source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
evaler.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import threading, sys, logging
from collections.abc import Iterator
from .lv_types import EventData
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
# pylint: disable=unused-import
from functools import *
from itertools import *
from statistics import *
import numpy as np
from .evaler_utils import *
class Evaler:
class EvalReturn:
def __init__(self, result=None, is_valid=False, exception=None):
self.result, self.exception, self.is_valid = \
result, exception, is_valid
def reset(self):
self.result, self.exception, self.is_valid = \
None, None, False
class PostableIterator:
def __init__(self, eval_wait):
self.eval_wait = eval_wait
self.post_wait = threading.Event()
self.event_data, self.ended = None, None # define attributes in init
self.reset()
def reset(self):
self.event_data, self.ended = None, False
self.post_wait.clear()
def abort(self):
self.ended = True
self.post_wait.set()
def post(self, event_data:EventData=None, ended=False):
self.event_data, self.ended = event_data, ended
self.post_wait.set()
def get_vals(self):
while True:
self.post_wait.wait()
self.post_wait.clear()
if self.ended:
break
else:
yield self.event_data
# below will cause result=None, is_valid=False when
# expression has reduce
self.eval_wait.set()
def __init__(self, expr):
self.eval_wait = threading.Event()
self.reset_wait = threading.Event()
self.g = Evaler.PostableIterator(self.eval_wait)
self.expr = expr
self.eval_return, self.continue_thread = None, None # define in __init__
self.reset()
self.th = threading.Thread(target=self._runner, daemon=True, name='evaler')
self.th.start()
self.running = True
def reset(self):
self.g.reset()
self.eval_wait.clear()
self.reset_wait.clear()
self.eval_return = Evaler.EvalReturn()
self.continue_thread = True
def _runner(self):
while True:
# this var will be used by eval
l = self.g.get_vals() # pylint: disable=unused-variable
try:
result = eval(self.expr) # pylint: disable=eval-used
if isinstance(result, Iterator):
for item in result:
self.eval_return = Evaler.EvalReturn(item, True)
else:
self.eval_return = Evaler.EvalReturn(result, True)
except Exception as ex: # pylint: disable=broad-except
logging.exception('Exception occured while evaluating expression: ' + self.expr)
self.eval_return = Evaler.EvalReturn(None, True, ex)
self.eval_wait.set()
self.reset_wait.wait()
if not self.continue_thread:
break
self.reset()
self.running = False
utils.debug_log('eval runner ended!')
def abort(self):
utils.debug_log('Evaler Aborted')
self.continue_thread = False
self.g.abort()
self.eval_wait.set()
self.reset_wait.set()
def post(self, event_data:EventData=None, ended=False, continue_thread=True):
if not self.running:
utils.debug_log('post was called when Evaler is not running')
return None, False
self.eval_return.reset()
self.g.post(event_data, ended)
self.eval_wait.wait()
self.eval_wait.clear()
# save result before it would get reset
eval_return = self.eval_return
self.reset_wait.set()
self.continue_thread = continue_thread
if isinstance(eval_return.result, Iterator):
eval_return.result = list(eval_return.result)
return eval_return
def join(self):
self.th.join()
|
human_controller.py
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Pose
import sys, select, os
import math
from gazebo_msgs.msg import ModelStates, ModelState, LinkState
import time
import threading
import numpy as np
class HumanController():
"""Human navigation based on social forces model"""
def __init__(self):
rospy.init_node('human_controller')
self.human_speed = 1 #typically 1.5 m/s
self.pub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=10)
# self.pub = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=10)
self.pose_subscriber = rospy.Subscriber('/gazebo/model_states', ModelStates, self.update_pose)
p1_state = ModelState()
# self.p1_state = LinkState()
p1_state.model_name = 'person_1'
p2_state = ModelState()
p2_state.model_name = 'person_2'
p3_state = ModelState()
p3_state.model_name = 'person_3'
p4_state = ModelState()
p4_state.model_name = 'person_4'
self.rate = rospy.Rate(10)
self.states = [p1_state, p2_state, p3_state, p4_state]
self.final_goals = [(7, -2), (7, 2), (-7, -2), (-7, 2)]
self.poses = None
self.positions = None
self.x_bound = [-7.5, 7.5]
self.y_bound = [-2.5, 2.5]
print('Human Controller launched initialized')
def update_pose(self, data):
"""Callback function which is called when a new message of type Pose is
received by the subscriber. This function is to unpack the poses of objects"""
try:
all_names = data.name
# print(all_names)
p1_ind = all_names.index('person_1')
p1_pose = data.pose[p1_ind]
p1_position = p1_pose.position
p2_ind = all_names.index('person_2')
p2_pose = data.pose[p2_ind]
p2_position = p2_pose.position
p3_ind = all_names.index('person_3')
p3_pose = data.pose[p3_ind]
p3_position = p3_pose.position
p4_ind = all_names.index('person_4')
p4_pose = data.pose[p4_ind]
p4_position = p4_pose.position
robot_ind = all_names.index('turtlebot3_burger')
robot_pose = data.pose[robot_ind]
robot_twist = data.twist[robot_ind]
self.robot_linvel = robot_twist.linear.x
self.robot_angvel = robot_twist.angular.z
self.robot_position = robot_pose.position
self.positions = [p1_position, p2_position, p3_position, p4_position]
self.poses = [p1_pose, p2_pose, p3_pose, p4_pose]
except Exception as e:
print(e)
time.sleep(1) # probably the publisher not started yet
pass
def get_distance(self, position, goal):
position = np.array([position.x, position.y])
return np.linalg.norm(position-goal)
def go_to_goal(self, goal, person, tolerance):
"""Get the person to go to the defined goal,
goal should be a [x, y] array type.
person index in [0, 1, 2, 3]
Try on person 1 first, probably need to add the specific person as argument"""
# tolerance = 0.05
print("STARTING HUMAN NAVIGATION")
goal = np.array(goal)
# curr_time = time.time()
while self.get_distance(self.positions[person], goal)>tolerance:
# get potential gradient and set velocity
curr_position = np.array([self.positions[person].x, self.positions[person].y])
grad = self.get_point_force(curr_position, goal, person)
xVel = -grad[0]
yVel = -grad[1]
self.states[person].pose.orientation = self.poses[person].orientation
#normalize to max vel
scale = self.human_speed/np.linalg.norm(np.array([xVel, yVel]))
self.states[person].pose.position.x = self.positions[person].x+scale*xVel*0.01
self.states[person].pose.position.y = self.positions[person].y+scale*yVel*0.01
self.states[person].twist.linear.x = scale*xVel
self.states[person].twist.linear.y = scale*yVel
self.pub.publish(self.states[person])
print("done moving to goal")
def control_human(self, goal):
"""Main function to control all humans"""
while self.poses==None:
time.sleep(1)
threads = []
for i in range(4):
x = threading.Thread(target=self.go_to_goal, args=(goal, i, 0.3))
threads.append(x)
x.start()
time.sleep(2) # move 2 secs apart
threads2 = []
for i, t in enumerate(threads):
t.join()
y = threading.Thread(target=self.go_to_goal, args=(self.final_goals[i], i, 0.05))
threads2.append(y)
y.start()
for t in threads2:
t.join()
def get_point_force(self, point, goal, person):
"""At the given point in the map, get the gradient of the
potential function. point and goal should both be 1x2 np arrays"""
wall_rep_const = 5
human_rep_const = 15 #stay further away from humans than to walls
att_const = 5
human_radius = 1 # influence range of other humans
wall_radius = 0.5 # influence range of the wall
# the robot repulsion parameters should be dynamic
robot_rep_const = 5*abs(self.robot_linvel+self.robot_angvel)+1
# print(robot_rep_const)
robot_radius = 1.5
#get components of gradients and then sum
att = att_const*(point-goal) #attraction to the goal
human_rep = np.array([0, 0])
for i in range(4):
h_position = self.positions[i]
dist_to_person = np.linalg.norm(point-[h_position.x, h_position.y])
if i!=person and dist_to_person<human_radius: #only consider other humans
nabla = ([h_position.x, h_position.y]-point)/dist_to_person
human_rep = human_rep+human_rep_const*nabla*(1/dist_to_person)
# robot repulsion
dist_to_robot = np.linalg.norm(point-[self.robot_position.x, self.robot_position.y])
if dist_to_robot<robot_radius:
nabla = ([self.robot_position.x, self.robot_position.y]-point)/dist_to_robot
robot_rep = robot_rep_const*nabla*(1/dist_to_robot)
else:
robot_rep = [0,0]
# get gradients due to walls
wall_rep = np.array([0,0])
if abs(point[0])<wall_radius and (point[1]>0.3 or point[1]<-0.3): #close to the middle wall
wall_rep = wall_rep+np.array([wall_rep_const*(-1/point[0]), 0])
elif abs(point[0]-(-7.5))<wall_radius:
wall_rep = wall_rep+np.array([wall_rep_const*(1/(-7.5-point[0])), 0])
elif abs(point[0]-7.5)<wall_radius:
wall_rep = wall_rep+np.array([wall_rep_const*(1/(7.5-point[0])), 0])
if abs(point[1]-(-2.5))<wall_radius: #top and bottom wall
wall_rep = wall_rep+np.array([0, wall_rep_const*(1/(-2.5-point[1]))])
elif abs(point[1]-2.5)<wall_radius: #top and bottom wall
wall_rep = wall_rep+np.array([0, wall_rep_const*(1/(2.5-point[1]))])
# print("att: ", att)
# print("wall rep: ", wall_rep)
# print("human_rep: ", human_rep)
try:
final_grad =att+wall_rep+human_rep+robot_rep
except:
print("att: ", att)
print("wall rep: ", wall_rep)
print("human_rep: ", human_rep)
return final_grad
if __name__=="__main__":
try:
hcontrol = HumanController()
door_pose = [0,0]
hcontrol.control_human(door_pose)
except rospy.ROSInterruptException:
pass
# t = 0
# while not rospy.is_shutdown():
# x_vel = 2*math.sin(t)
# # x_vel = 2
# ang_vel = 0.5
# p1_state.twist.linear.x = x_vel
# p1_state.twist.angular.x = ang_vel
# pub.publish(p1_state)
# t+=1
# rate.sleep()
|
conftest.py
|
"""pytest fixtures for kubespawner"""
import base64
import inspect
import io
import logging
import os
import sys
import tarfile
import time
from distutils.version import LooseVersion as V
from functools import partial
from threading import Thread
import kubernetes
import pytest
from jupyterhub.app import JupyterHub
from jupyterhub.objects import Hub
from kubernetes.client import V1ConfigMap
from kubernetes.client import V1Namespace
from kubernetes.client import V1Pod
from kubernetes.client import V1PodSpec
from kubernetes.client import V1Secret
from kubernetes.client import V1Service
from kubernetes.client import V1ServicePort
from kubernetes.client import V1ServiceSpec
from kubernetes.client.rest import ApiException
from kubernetes.config import load_kube_config
from kubernetes.stream import stream
from kubernetes.watch import Watch
from traitlets.config import Config
from kubespawner.clients import shared_client
here = os.path.abspath(os.path.dirname(__file__))
jupyterhub_config_py = os.path.join(here, "jupyterhub_config.py")
@pytest.fixture(autouse=True)
def traitlets_logging():
"""Ensure traitlets default logging is enabled
so KubeSpawner logs are captured by pytest.
By default, there is a "NullHandler" so no logs are produced.
"""
logger = logging.getLogger('traitlets')
logger.setLevel(logging.DEBUG)
logger.handlers = []
@pytest.fixture(scope="session")
def kube_ns():
"""Fixture for the kubernetes namespace"""
return os.environ.get("KUBESPAWNER_TEST_NAMESPACE") or "kubespawner-test"
@pytest.fixture
def config(kube_ns):
"""Return a traitlets Config object
The base configuration for testing.
Use when constructing Spawners for tests
"""
cfg = Config()
cfg.KubeSpawner.namespace = kube_ns
cfg.KubeSpawner.cmd = ["jupyterhub-singleuser"]
cfg.KubeSpawner.start_timeout = 180
# prevent spawners from exiting early due to missing env
cfg.KubeSpawner.environment = {
"JUPYTERHUB_API_TOKEN": "test-secret-token",
"JUPYTERHUB_CLIENT_ID": "ignored",
}
return cfg
@pytest.fixture(scope="session")
def ssl_app(tmpdir_factory, kube_ns):
"""Partially instantiate a JupyterHub instance to generate ssl certificates
Generates ssl certificates on the host,
which will then be staged
This is not a fully instantiated Hub,
but it will have internal_ssl-related attributes such as
.internal_trust_bundles and .internal_certs_location initialized.
"""
tmpdir = tmpdir_factory.mktemp("ssl")
tmpdir.chdir()
config = Config()
config.JupyterHub.internal_ssl = True
tmpdir.mkdir("internal-ssl")
# use relative path for ssl certs
config.JupyterHub.internal_certs_location = "internal-ssl"
config.JupyterHub.trusted_alt_names = [
"DNS:hub-ssl",
f"DNS:hub-ssl.{kube_ns}",
f"DNS:hub-ssl.{kube_ns}.svc",
f"DNS:hub-ssl.{kube_ns}.svc.cluster.local",
]
app = JupyterHub(config=config)
app.init_internal_ssl()
return app
def watch_logs(kube_client, pod_info):
"""Stream a single pod's logs
pod logs are streamed directly to sys.stderr,
so that pytest capture can deal with it.
Blocking, should be run in a thread.
Called for each new pod from watch_kubernetes
"""
watch = Watch()
while True:
try:
for event in watch.stream(
func=kube_client.read_namespaced_pod_log,
namespace=pod_info.namespace,
name=pod_info.name,
):
print(f"[{pod_info.name}]: {event}")
except ApiException as e:
if e.status == 400:
# 400 can occur if the container is not yet ready
# wait and retry
time.sleep(1)
continue
elif e.status == 404:
# pod is gone, we are done
return
else:
# unexpeced error
print(f"Error watching logs for {pod_info.name}: {e}", file=sys.stderr)
raise
else:
break
def watch_kubernetes(kube_client, kube_ns):
"""Stream kubernetes events to stdout
so that pytest io capturing can include k8s events and logs
All events are streamed to stdout
When a new pod is started, spawn an additional thread to watch its logs
"""
log_threads = {}
watch = Watch()
for event in watch.stream(
func=kube_client.list_namespaced_event,
namespace=kube_ns,
):
resource = event['object']
obj = resource.involved_object
print(f"k8s event ({event['type']} {obj.kind}/{obj.name}): {resource.message}")
# new pod appeared, start streaming its logs
if (
obj.kind == "Pod"
and event["type"] == "ADDED"
and obj.name not in log_threads
):
log_threads[obj.name] = t = Thread(
target=watch_logs, args=(kube_client, obj), daemon=True
)
t.start()
@pytest.fixture(scope="session")
def kube_client(request, kube_ns):
"""fixture for the Kubernetes client object.
skips test that require kubernetes if kubernetes cannot be contacted
- Ensures kube_ns namespace exists
- Hooks up kubernetes events and logs to pytest capture
- Cleans up kubernetes namespace on exit
"""
load_kube_config()
client = shared_client("CoreV1Api")
try:
namespaces = client.list_namespace(_request_timeout=3)
except Exception as e:
pytest.skip("Kubernetes not found: %s" % e)
if not any(ns.metadata.name == kube_ns for ns in namespaces.items):
print("Creating namespace %s" % kube_ns)
client.create_namespace(V1Namespace(metadata=dict(name=kube_ns)))
else:
print("Using existing namespace %s" % kube_ns)
# begin streaming all logs and events in our test namespace
t = Thread(target=watch_kubernetes, args=(client, kube_ns), daemon=True)
t.start()
# delete the test namespace when we finish
def cleanup_namespace():
client.delete_namespace(kube_ns, body={}, grace_period_seconds=0)
for i in range(3):
try:
ns = client.read_namespace(kube_ns)
except ApiException as e:
if e.status == 404:
return
else:
raise
else:
print("waiting for %s to delete" % kube_ns)
time.sleep(1)
# allow opting out of namespace cleanup, for post-mortem debugging
if not os.environ.get("KUBESPAWNER_DEBUG_NAMESPACE"):
request.addfinalizer(cleanup_namespace)
return client
def wait_for_pod(kube_client, kube_ns, pod_name, timeout=90):
"""Wait for a pod to be ready"""
conditions = {}
for i in range(int(timeout)):
pod = kube_client.read_namespaced_pod(namespace=kube_ns, name=pod_name)
for condition in pod.status.conditions or []:
conditions[condition.type] = condition.status
if conditions.get("Ready") != "True":
print(
f"Waiting for pod {kube_ns}/{pod_name}; current status: {pod.status.phase}; {conditions}"
)
time.sleep(1)
else:
break
if conditions.get("Ready") != "True":
raise TimeoutError(f"pod {kube_ns}/{pod_name} failed to start: {pod.status}")
return pod
def ensure_not_exists(kube_client, kube_ns, name, resource_type, timeout=30):
"""Ensure an object doesn't exist
Request deletion and wait for it to be gone
"""
delete = getattr(kube_client, "delete_namespaced_{}".format(resource_type))
read = getattr(kube_client, "read_namespaced_{}".format(resource_type))
try:
delete(namespace=kube_ns, name=name)
except ApiException as e:
if e.status != 404:
raise
while True:
# wait for delete
try:
read(namespace=kube_ns, name=name)
except ApiException as e:
if e.status == 404:
# deleted
break
else:
raise
else:
print("waiting for {}/{} to delete".format(resource_type, name))
time.sleep(1)
def create_resource(kube_client, kube_ns, resource_type, manifest, delete_first=True):
"""Create a kubernetes resource
handling 409 errors and others that can occur due to rapid startup
(typically: default service account doesn't exist yet
"""
name = manifest.metadata["name"]
if delete_first:
ensure_not_exists(kube_client, kube_ns, name, resource_type)
print(f"Creating {resource_type} {name}")
create = getattr(kube_client, f"create_namespaced_{resource_type}")
error = None
for i in range(10):
try:
create(
body=manifest,
namespace=kube_ns,
)
except ApiException as e:
if e.status == 409:
break
error = e
# need to retry since this can fail if run too soon after namespace creation
print(e, file=sys.stderr)
time.sleep(int(e.headers.get("Retry-After", 1)))
else:
break
else:
raise error
def create_hub_pod(kube_client, kube_ns, pod_name="hub", ssl=False):
config_map_name = pod_name + "-config"
secret_name = pod_name + "-secret"
with open(jupyterhub_config_py) as f:
config = f.read()
config_map_manifest = V1ConfigMap(
metadata={"name": config_map_name}, data={"jupyterhub_config.py": config}
)
config_map = create_resource(
kube_client,
kube_ns,
"config_map",
config_map_manifest,
delete_first=True,
)
volumes = [{"name": "config", "configMap": {"name": config_map_name}}]
volume_mounts = [
{
"mountPath": "/etc/jupyterhub/jupyterhub_config.py",
"subPath": "jupyterhub_config.py",
"name": "config",
}
]
if ssl:
volumes.append({"name": "secret", "secret": {"secretName": secret_name}})
volume_mounts.append(
{
"mountPath": "/etc/jupyterhub/secret",
"name": "secret",
}
)
pod_manifest = V1Pod(
metadata={
"name": pod_name,
"labels": {"component": "hub", "hub-name": pod_name},
},
spec=V1PodSpec(
volumes=volumes,
containers=[
{
"image": "jupyterhub/jupyterhub:1.3",
"name": "hub",
"volumeMounts": volume_mounts,
"args": [
"jupyterhub",
"-f",
"/etc/jupyterhub/jupyterhub_config.py",
],
"env": [{"name": "PYTHONUNBUFFERED", "value": "1"}],
"readinessProbe": {
"tcpSocket": {
"port": 8081,
},
"periodSeconds": 1,
},
}
],
),
)
pod = create_resource(kube_client, kube_ns, "pod", pod_manifest)
return wait_for_pod(kube_client, kube_ns, pod_name)
@pytest.fixture(scope="session")
def hub_pod(kube_client, kube_ns):
"""Create and return a pod running jupyterhub"""
return create_hub_pod(kube_client, kube_ns)
@pytest.fixture
def hub(hub_pod):
"""Return the jupyterhub Hub object for passing to Spawner constructors
Ensures the hub_pod is running
"""
return Hub(ip=hub_pod.status.pod_ip, port=8081)
@pytest.fixture(scope="session")
def hub_pod_ssl(kube_client, kube_ns, ssl_app):
"""Start a hub pod with internal_ssl enabled"""
# load ssl dir to tarfile
buf = io.BytesIO()
tf = tarfile.TarFile(fileobj=buf, mode="w")
tf.add(ssl_app.internal_certs_location, arcname="internal-ssl", recursive=True)
# store tarfile in a secret
b64_certs = base64.b64encode(buf.getvalue()).decode("ascii")
secret_name = "hub-ssl-secret"
secret_manifest = V1Secret(
metadata={"name": secret_name}, data={"internal-ssl.tar": b64_certs}
)
create_resource(kube_client, kube_ns, "secret", secret_manifest)
name = "hub-ssl"
service_manifest = V1Service(
metadata=dict(name=name),
spec=V1ServiceSpec(
type="ClusterIP",
ports=[V1ServicePort(port=8081, target_port=8081)],
selector={"hub-name": name},
),
)
create_resource(kube_client, kube_ns, "service", service_manifest)
return create_hub_pod(
kube_client,
kube_ns,
pod_name=name,
ssl=True,
)
@pytest.fixture
def hub_ssl(kube_ns, hub_pod_ssl):
"""Return the Hub object for connecting to a running hub pod with internal_ssl enabled"""
return Hub(
proto="https",
ip=f"{hub_pod_ssl.metadata.name}.{kube_ns}",
port=8081,
base_url="/hub/",
)
class ExecError(Exception):
"""Error raised when a kubectl exec fails"""
def __init__(self, exit_code, message="", command="exec"):
self.exit_code = exit_code
self.message = message
self.command = command
def __str__(self):
return "{command} exited with status {exit_code}: {message}".format(
command=self.command,
exit_code=self.exit_code,
message=self.message,
)
def _exec_python_in_pod(kube_client, kube_ns, pod_name, code, kwargs=None, _retries=0):
"""Run simple Python code in a pod
code can be a str of code, or a 'simple' Python function,
where source can be extracted (i.e. self-contained imports, etc.)
kwargs are passed to the function, if it is given.
"""
if V(kubernetes.__version__) < V("11"):
pytest.skip(
f"exec tests require kubernetes >= 11, got {kubernetes.__version__}"
)
pod = wait_for_pod(kube_client, kube_ns, pod_name)
original_code = code
if not isinstance(code, str):
# allow simple self-contained (no globals or args) functions
func = code
code = "\n".join(
[
inspect.getsource(func),
"_kw = %r" % (kwargs or {}),
"{}(**_kw)".format(func.__name__),
"",
]
)
elif kwargs:
raise ValueError("kwargs can only be passed to functions, not code strings.")
exec_command = [
"python3",
"-c",
code,
]
print("Running {} in {}".format(code, pod_name))
# need to create ws client to get returncode,
# see https://github.com/kubernetes-client/python/issues/812
client = stream(
kube_client.connect_get_namespaced_pod_exec,
pod_name,
namespace=kube_ns,
command=exec_command,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
)
client.run_forever(timeout=60)
# let pytest capture stderr
stderr = client.read_stderr()
print(stderr, file=sys.stderr)
returncode = client.returncode
if returncode:
print(client.read_stdout())
if _retries == 0:
raise ExecError(exit_code=returncode, message=stderr, command=code)
else:
# retry
time.sleep(1)
return _exec_python_in_pod(
kube_client,
kube_ns,
pod_name,
code,
_retries=_retries - 1,
)
else:
return client.read_stdout().rstrip()
@pytest.fixture
def exec_python_pod(kube_client, kube_ns):
"""Fixture to return callable to execute python in a pod by name
Used as a fixture to contain references to client, namespace
"""
return partial(_exec_python_in_pod, kube_client, kube_ns)
@pytest.fixture(scope="session")
def exec_python(kube_ns, kube_client):
"""Return a callable to execute Python code in a pod in the test namespace
This fixture creates a dedicated pod for executing commands
"""
# note: this was created when there were only single-user pods running,
# but now there's always a hub pod where we could be running,
# and the ssl case *must* run from the hub pod for access to certs
# Note: we could do without this feature if we always ran
pod_name = "kubespawner-test-exec"
pod_manifest = V1Pod(
metadata={"name": pod_name},
spec=V1PodSpec(
containers=[
{
"image": "python:3.8",
"name": "python",
"args": ["/bin/sh", "-c", "while true; do sleep 5; done"],
}
],
termination_grace_period_seconds=0,
),
)
pod = create_resource(kube_client, kube_ns, "pod", pod_manifest)
yield partial(_exec_python_in_pod, kube_client, kube_ns, pod_name)
|
gpu_usage.py
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Vincenzo Lomonaco, Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
import GPUtil
from threading import Thread
import time
import warnings
from typing import Optional, TYPE_CHECKING, List
from avalanche.evaluation import Metric, PluginMetric, GenericPluginMetric
from avalanche.evaluation.metric_results import MetricResult
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class MaxGPU(Metric[float]):
"""
The standalone GPU usage metric.
Important: this metric approximates the real maximum GPU percentage
usage since it sample at discrete amount of time the GPU values.
Instances of this metric keeps the maximum GPU usage percentage detected.
The `start_thread` method starts the usage tracking.
The `stop_thread` method stops the tracking.
The result, obtained using the `result` method, is the usage in mega-bytes.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the GPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
self.every = every
self.gpu_id = gpu_id
n_gpus = len(GPUtil.getGPUs())
if n_gpus == 0:
warnings.warn("Your system has no GPU!")
self.gpu_id = None
elif gpu_id < 0:
warnings.warn("GPU metric called with negative GPU id."
"GPU logging disabled")
self.gpu_id = None
else:
if gpu_id >= n_gpus:
warnings.warn(f"GPU {gpu_id} not found. Using GPU 0.")
self.gpu_id = 0
self.thread = None
"""
Thread executing GPU monitoring code
"""
self.stop_f = False
"""
Flag to stop the thread
"""
self.max_usage = 0
"""
Main metric result. Max GPU usage.
"""
def _f(self):
"""
Until a stop signal is encountered,
this function monitors each `every` seconds
the maximum amount of GPU used by the process
"""
start_time = time.monotonic()
while not self.stop_f:
# GPU percentage
gpu_perc = GPUtil.getGPUs()[self.gpu_id].load * 100
if gpu_perc > self.max_usage:
self.max_usage = gpu_perc
time.sleep(self.every - ((time.monotonic() - start_time)
% self.every))
def start_thread(self):
if self.gpu_id is not None:
assert not self.thread, "Trying to start thread " \
"without joining the previous."
self.thread = Thread(target=self._f, daemon=True)
self.thread.start()
def stop_thread(self):
if self.thread:
self.stop_f = True
self.thread.join()
self.stop_f = False
self.thread = None
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self.max_usage = 0
def result(self) -> Optional[float]:
"""
Returns the max GPU percentage value.
:return: The percentage GPU usage as a float value in range [0, 1].
"""
return self.max_usage
def update(self):
pass
class GPUPluginMetric(GenericPluginMetric[float]):
def __init__(self, gpu_id, every, reset_at, emit_at, mode):
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
super(GPUPluginMetric, self).__init__(
self._gpu, reset_at=reset_at, emit_at=emit_at,
mode=mode)
def update(self, strategy):
self._gpu.update()
class MinibatchMaxGPU(GPUPluginMetric):
"""
The Minibatch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Minibatch Max GPU metric
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(MinibatchMaxGPU, self).__init__(
gpu_id, every,
reset_at='iteration', emit_at='iteration', mode='train')
def before_training(self, strategy: 'BaseStrategy') \
-> None:
super().before_training(strategy)
self._gpu.start_thread()
def after_training(self, strategy: 'BaseStrategy') -> None:
super().before_training(strategy)
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_MB"
class EpochMaxGPU(GPUPluginMetric):
"""
The Epoch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the epoch Max GPU metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(EpochMaxGPU, self).__init__(
gpu_id, every,
reset_at='epoch', emit_at='epoch', mode='train')
def before_training(self, strategy: 'BaseStrategy'):
super().before_training(strategy)
self._gpu.start_thread()
def after_training(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Epoch"
class ExperienceMaxGPU(GPUPluginMetric):
"""
The Experience Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(ExperienceMaxGPU, self).__init__(
gpu_id, every,
reset_at='experience', emit_at='experience', mode='eval')
def before_eval(self, strategy: 'BaseStrategy'):
super().before_eval(strategy)
self._gpu.start_thread()
def after_eval(self, strategy: 'BaseStrategy'):
super().after_eval(strategy)
self._gpu.stop_thread()
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Experience"
class StreamMaxGPU(GPUPluginMetric):
"""
The Stream Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super(StreamMaxGPU, self).__init__(
gpu_id, every,
reset_at='stream', emit_at='stream', mode='eval')
def before_eval(self, strategy):
super().before_eval(strategy)
self._gpu.start_thread()
def after_eval(self, strategy: 'BaseStrategy') \
-> MetricResult:
packed = super().after_eval(strategy)
self._gpu.stop_thread()
return packed
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Stream"
def gpu_usage_metrics(gpu_id, every=0.5, minibatch=False, epoch=False,
experience=False, stream=False) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
:param minibatch: If True, will return a metric able to log the minibatch
max GPU usage.
:param epoch: If True, will return a metric able to log the epoch
max GPU usage.
:param experience: If True, will return a metric able to log the experience
max GPU usage.
:param stream: If True, will return a metric able to log the evaluation
max stream GPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxGPU(gpu_id, every))
if epoch:
metrics.append(EpochMaxGPU(gpu_id, every))
if experience:
metrics.append(ExperienceMaxGPU(gpu_id, every))
if stream:
metrics.append(StreamMaxGPU(gpu_id, every))
return metrics
__all__ = [
'MaxGPU',
'MinibatchMaxGPU',
'EpochMaxGPU',
'ExperienceMaxGPU',
'StreamMaxGPU',
'gpu_usage_metrics'
]
|
test_socket_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import time
import uuid
from unittest import mock
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.agents import AssignState
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.shared_utils as shared_utils
import threading
from websocket_server import WebsocketServer
import json
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
MESSAGE_ID_3 = 'MESSAGE_ID_3'
MESSAGE_ID_4 = 'MESSAGE_ID_4'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE, AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING, AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE, AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED, AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
TASK_GROUP_ID_1 = 'TASK_GROUP_ID_1'
SocketManager.DEF_MISSED_PONGS = 3
SocketManager.HEARTBEAT_RATE = 0.6
SocketManager.DEF_DEAD_TIME = 0.6
SocketManager.ACK_TIME = {Packet.TYPE_ALIVE: 0.4,
Packet.TYPE_MESSAGE: 0.2}
shared_utils.THREAD_SHORT_SLEEP = 0.05
shared_utils.THREAD_MEDIUM_SLEEP = 0.15
class TestPacket(unittest.TestCase):
"""Various unit tests for the AssignState class"""
ID = 'ID'
SENDER_ID = 'SENDER_ID'
RECEIVER_ID = 'RECEIVER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
REQUIRES_ACK = True
BLOCKING = False
ACK_FUNCTION = 'ACK_FUNCTION'
def setUp(self):
self.packet_1 = Packet(self.ID, Packet.TYPE_MESSAGE, self.SENDER_ID,
self.RECEIVER_ID, self.ASSIGNMENT_ID, self.DATA,
conversation_id=self.CONVERSATION_ID,
requires_ack=self.REQUIRES_ACK,
blocking=self.BLOCKING,
ack_func=self.ACK_FUNCTION)
self.packet_2 = Packet(self.ID, Packet.TYPE_HEARTBEAT, self.SENDER_ID,
self.RECEIVER_ID, self.ASSIGNMENT_ID, self.DATA)
self.packet_3 = Packet(self.ID, Packet.TYPE_ALIVE, self.SENDER_ID,
self.RECEIVER_ID, self.ASSIGNMENT_ID, self.DATA)
def tearDown(self):
pass
def test_packet_init(self):
'''Test proper initialization of packet fields'''
self.assertEqual(self.packet_1.id, self.ID)
self.assertEqual(self.packet_1.type, Packet.TYPE_MESSAGE)
self.assertEqual(self.packet_1.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_1.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_1.data, self.DATA)
self.assertEqual(self.packet_1.conversation_id, self.CONVERSATION_ID)
self.assertEqual(self.packet_1.requires_ack, self.REQUIRES_ACK)
self.assertEqual(self.packet_1.blocking, self.BLOCKING)
self.assertEqual(self.packet_1.ack_func, self.ACK_FUNCTION)
self.assertEqual(self.packet_1.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_2.id, self.ID)
self.assertEqual(self.packet_2.type, Packet.TYPE_HEARTBEAT)
self.assertEqual(self.packet_2.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_2.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_2.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_2.data, self.DATA)
self.assertIsNone(self.packet_2.conversation_id)
self.assertFalse(self.packet_2.requires_ack)
self.assertFalse(self.packet_2.blocking)
self.assertIsNone(self.packet_2.ack_func)
self.assertEqual(self.packet_2.status, Packet.STATUS_INIT)
self.assertEqual(self.packet_3.id, self.ID)
self.assertEqual(self.packet_3.type, Packet.TYPE_ALIVE)
self.assertEqual(self.packet_3.sender_id, self.SENDER_ID)
self.assertEqual(self.packet_3.receiver_id, self.RECEIVER_ID)
self.assertEqual(self.packet_3.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(self.packet_3.data, self.DATA)
self.assertIsNone(self.packet_3.conversation_id)
self.assertTrue(self.packet_3.requires_ack)
self.assertTrue(self.packet_3.blocking)
self.assertIsNone(self.packet_3.ack_func)
self.assertEqual(self.packet_3.status, Packet.STATUS_INIT)
def test_dict_conversion(self):
'''Ensure packets can be converted to and from a representative dict'''
converted_packet = Packet.from_dict(self.packet_1.as_dict())
self.assertEqual(self.packet_1.id, converted_packet.id)
self.assertEqual(self.packet_1.type, converted_packet.type)
self.assertEqual(
self.packet_1.sender_id, converted_packet.sender_id)
self.assertEqual(
self.packet_1.receiver_id, converted_packet.receiver_id)
self.assertEqual(
self.packet_1.assignment_id, converted_packet.assignment_id)
self.assertEqual(self.packet_1.data, converted_packet.data)
self.assertEqual(
self.packet_1.conversation_id, converted_packet.conversation_id)
packet_dict = self.packet_1.as_dict()
self.assertDictEqual(
packet_dict, Packet.from_dict(packet_dict).as_dict())
def test_connection_ids(self):
'''Ensure that connection ids are reported as we expect them'''
sender_conn_id = '{}_{}'.format(self.SENDER_ID, self.ASSIGNMENT_ID)
receiver_conn_id = '{}_{}'.format(self.RECEIVER_ID, self.ASSIGNMENT_ID)
self.assertEqual(
self.packet_1.get_sender_connection_id(), sender_conn_id)
self.assertEqual(
self.packet_1.get_receiver_connection_id(), receiver_conn_id)
def test_packet_conversions(self):
'''Ensure that packet copies and acts are produced properly'''
# Copy important packet
message_packet_copy = self.packet_1.new_copy()
self.assertNotEqual(message_packet_copy.id, self.ID)
self.assertNotEqual(message_packet_copy, self.packet_1)
self.assertEqual(message_packet_copy.type, self.packet_1.type)
self.assertEqual(
message_packet_copy.sender_id, self.packet_1.sender_id)
self.assertEqual(
message_packet_copy.receiver_id, self.packet_1.receiver_id)
self.assertEqual(
message_packet_copy.assignment_id, self.packet_1.assignment_id)
self.assertEqual(message_packet_copy.data, self.packet_1.data)
self.assertEqual(
message_packet_copy.conversation_id, self.packet_1.conversation_id)
self.assertEqual(
message_packet_copy.requires_ack, self.packet_1.requires_ack)
self.assertEqual(
message_packet_copy.blocking, self.packet_1.blocking)
self.assertIsNone(message_packet_copy.ack_func)
self.assertEqual(message_packet_copy.status, Packet.STATUS_INIT)
# Copy non-important packet
hb_packet_copy = self.packet_2.new_copy()
self.assertNotEqual(hb_packet_copy.id, self.ID)
self.assertNotEqual(hb_packet_copy, self.packet_2)
self.assertEqual(hb_packet_copy.type, self.packet_2.type)
self.assertEqual(hb_packet_copy.sender_id, self.packet_2.sender_id)
self.assertEqual(hb_packet_copy.receiver_id, self.packet_2.receiver_id)
self.assertEqual(
hb_packet_copy.assignment_id, self.packet_2.assignment_id)
self.assertEqual(hb_packet_copy.data, self.packet_2.data)
self.assertEqual(
hb_packet_copy.conversation_id, self.packet_2.conversation_id)
self.assertEqual(
hb_packet_copy.requires_ack, self.packet_2.requires_ack)
self.assertEqual(hb_packet_copy.blocking, self.packet_2.blocking)
self.assertIsNone(hb_packet_copy.ack_func)
self.assertEqual(hb_packet_copy.status, Packet.STATUS_INIT)
# ack important packet
ack_packet = self.packet_1.get_ack()
self.assertEqual(ack_packet.id, self.ID)
self.assertEqual(ack_packet.type, Packet.TYPE_ACK)
self.assertEqual(ack_packet.sender_id, self.RECEIVER_ID)
self.assertEqual(ack_packet.receiver_id, self.SENDER_ID)
self.assertEqual(ack_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(ack_packet.data, '')
self.assertEqual(ack_packet.conversation_id, self.CONVERSATION_ID)
self.assertFalse(ack_packet.requires_ack)
self.assertFalse(ack_packet.blocking)
self.assertIsNone(ack_packet.ack_func)
self.assertEqual(ack_packet.status, Packet.STATUS_INIT)
def test_packet_modifications(self):
'''Ensure that packet copies and acts are produced properly'''
# All operations return the packet
self.assertEqual(self.packet_1.swap_sender(), self.packet_1)
self.assertEqual(
self.packet_1.set_type(Packet.TYPE_ACK), self.packet_1)
self.assertEqual(self.packet_1.set_data(None), self.packet_1)
# Ensure all of the operations worked
self.assertEqual(self.packet_1.sender_id, self.RECEIVER_ID)
self.assertEqual(self.packet_1.receiver_id, self.SENDER_ID)
self.assertEqual(self.packet_1.type, Packet.TYPE_ACK)
self.assertIsNone(self.packet_1.data)
class MockSocket():
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.should_heartbeat = True
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(
client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['content']['type'] == 'heartbeat':
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(client, json.dumps({
'type': data_model.SOCKET_ROUTE_PACKET_STRING,
'content': pong,
}))
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket,
name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class MockAgent(object):
"""Class that pretends to be an MTurk agent interacting through the
webpage by simulating the same commands that are sent from the core.html
file. Exposes methods to use for testing and checking status
"""
def __init__(self, hit_id, assignment_id, worker_id,
task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.ws = None
self.always_beat = True
self.send_acks = True
self.ready = False
self.wants_to_send = False
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
self.ws.send(json.dumps({
'type': event_name,
'content': packet.as_dict(),
}))
def register_to_socket(self, ws, on_ack, on_hb, on_msg):
handler = self.make_packet_handler(on_ack, on_hb, on_msg)
self.ws = ws
self.ws.handlers[self.worker_id] = handler
def make_packet_handler(self, on_ack, on_hb, on_msg):
"""A packet handler that properly sends heartbeats"""
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
self.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
if self.always_beat:
self.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
if self.send_acks:
self.send_packet(packet.get_ack())
on_msg(packet)
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def build_and_send_packet(self, packet_type, data):
msg = {
'id': str(uuid.uuid4()),
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data
}
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
if (packet_type == Packet.TYPE_ALIVE):
event_name = data_model.SOCKET_AGENT_ALIVE_STRING
self.ws.send(json.dumps({
'type': event_name,
'content': msg,
}))
return msg['id']
def send_message(self, text):
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False
}
self.wants_to_send = False
return self.build_and_send_packet(Packet.TYPE_MESSAGE, data)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id
}
return self.build_and_send_packet(Packet.TYPE_ALIVE, data)
def send_heartbeat(self):
"""Sends a heartbeat to the world"""
hb = {
'id': str(uuid.uuid4()),
'receiver_id': '[World_' + self.task_group_id + ']',
'assignment_id': self.assignment_id,
'sender_id': self.worker_id,
'conversation_id': self.conversation_id,
'type': Packet.TYPE_HEARTBEAT,
'data': None
}
self.ws.send(json.dumps({
'type': data_model.SOCKET_ROUTE_PACKET_STRING,
'content': hb,
}))
def wait_for_alive(self):
last_time = time.time()
while not self.ready:
self.send_alive()
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge {} alive'.format(
self.worker_id
)
class TestSocketManagerSetupAndFunctions(unittest.TestCase):
"""Unit/integration tests for starting up a socket"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(1)
def tearDown(self):
self.fake_socket.close()
def test_init_and_reg_shutdown(self):
'''Test initialization of a socket manager'''
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
socket_manager = SocketManager('https://127.0.0.1',
self.fake_socket.port, nop, nop,
nop, TASK_GROUP_ID_1, 0.3, nop)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
socket_manager.shutdown()
self.assertTrue(self.fake_socket.disconnected)
self.assertTrue(socket_manager.is_shutdown)
self.assertFalse(nop_called)
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert time.time() - start_time < max_time, \
"Value was not attained in specified time, was {} rather " \
"than {}".format(val_func(), val)
time.sleep(0.1)
def test_init_and_socket_shutdown(self):
'''Test initialization of a socket manager with a failed shutdown'''
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager('https://127.0.0.1',
self.fake_socket.port, nop, nop,
nop, TASK_GROUP_ID_1, 0.4, server_death)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(lambda: socket_manager.alive, False,
8 * socket_manager.HEARTBEAT_RATE)
self.assertEqualBy(lambda: server_death_called, True,
4 * socket_manager.HEARTBEAT_RATE)
self.assertFalse(nop_called)
socket_manager.shutdown()
def test_init_and_socket_shutdown_then_restart(self):
'''Test restoring connection to a socket'''
self.assertFalse(self.fake_socket.connected)
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
socket_manager = SocketManager('https://127.0.0.1',
self.fake_socket.port, nop, nop,
nop, TASK_GROUP_ID_1, 0.4, server_death)
self.assertTrue(self.fake_socket.connected)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
# Test shutdown
self.assertFalse(self.fake_socket.disconnected)
self.assertFalse(socket_manager.is_shutdown)
self.assertTrue(socket_manager.alive)
self.fake_socket.close()
self.assertEqualBy(lambda: socket_manager.alive, False,
8 * socket_manager.HEARTBEAT_RATE)
self.assertFalse(socket_manager.alive)
self.fake_socket = MockSocket()
self.assertEqualBy(lambda: socket_manager.alive, True,
4 * socket_manager.HEARTBEAT_RATE)
self.assertFalse(nop_called)
self.assertFalse(server_death_called)
socket_manager.shutdown()
def test_init_world_dead(self):
'''Test initialization of a socket manager with a failed startup'''
self.assertFalse(self.fake_socket.connected)
self.fake_socket.close()
# Callbacks should never trigger during proper setup and shutdown
nop_called = False
def nop(*args):
nonlocal nop_called # noqa 999 we don't support py2
nop_called = True
server_death_called = False
def server_death(*args):
nonlocal server_death_called
server_death_called = True
with self.assertRaises(ConnectionRefusedError):
socket_manager = SocketManager('https://127.0.0.1',
self.fake_socket.port, nop, nop,
nop, TASK_GROUP_ID_1, 0.4,
server_death)
self.assertIsNone(socket_manager)
self.assertFalse(nop_called)
self.assertTrue(server_death_called)
class TestSocketManagerRoutingFunctionality(unittest.TestCase):
ID = 'ID'
SENDER_ID = 'SENDER_ID'
ASSIGNMENT_ID = 'ASSIGNMENT_ID'
DATA = 'DATA'
CONVERSATION_ID = 'CONVERSATION_ID'
REQUIRES_ACK = True
BLOCKING = False
ACK_FUNCTION = 'ACK_FUNCTION'
WORLD_ID = '[World_{}]'.format(TASK_GROUP_ID_1)
def on_alive(self, packet):
self.alive_packet = packet
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def setUp(self):
self.AGENT_HEARTBEAT_PACKET = Packet(
self.ID, Packet.TYPE_HEARTBEAT, self.SENDER_ID, self.WORLD_ID,
self.ASSIGNMENT_ID, self.DATA, self.CONVERSATION_ID)
self.AGENT_ALIVE_PACKET = Packet(
MESSAGE_ID_1, Packet.TYPE_ALIVE, self.SENDER_ID, self.WORLD_ID,
self.ASSIGNMENT_ID, self.DATA, self.CONVERSATION_ID)
self.MESSAGE_SEND_PACKET_1 = Packet(
MESSAGE_ID_2, Packet.TYPE_MESSAGE, self.WORLD_ID, self.SENDER_ID,
self.ASSIGNMENT_ID, self.DATA, self.CONVERSATION_ID)
self.MESSAGE_SEND_PACKET_2 = Packet(
MESSAGE_ID_3, Packet.TYPE_MESSAGE, self.WORLD_ID, self.SENDER_ID,
self.ASSIGNMENT_ID, self.DATA, self.CONVERSATION_ID,
requires_ack=False)
self.MESSAGE_SEND_PACKET_3 = Packet(
MESSAGE_ID_4, Packet.TYPE_MESSAGE, self.WORLD_ID, self.SENDER_ID,
self.ASSIGNMENT_ID, self.DATA, self.CONVERSATION_ID,
blocking=False)
self.fake_socket = MockSocket()
time.sleep(0.3)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1', self.fake_socket.port, self.on_alive,
self.on_message, self.on_worker_death, TASK_GROUP_ID_1, 1,
self.on_server_death)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
def test_init_state(self):
'''Ensure all of the initial state of the socket_manager is ready'''
self.assertEqual(self.socket_manager.server_url, 'https://127.0.0.1')
self.assertEqual(self.socket_manager.port, self.fake_socket.port)
self.assertEqual(self.socket_manager.alive_callback, self.on_alive)
self.assertEqual(self.socket_manager.message_callback, self.on_message)
self.assertEqual(self.socket_manager.socket_dead_callback,
self.on_worker_death)
self.assertEqual(self.socket_manager.task_group_id, TASK_GROUP_ID_1)
self.assertEqual(self.socket_manager.missed_pongs,
1 + (1 / SocketManager.HEARTBEAT_RATE))
self.assertIsNotNone(self.socket_manager.ws)
self.assertTrue(self.socket_manager.keep_running)
self.assertIsNotNone(self.socket_manager.listen_thread)
self.assertDictEqual(self.socket_manager.queues, {})
self.assertDictEqual(self.socket_manager.threads, {})
self.assertDictEqual(self.socket_manager.run, {})
self.assertDictEqual(self.socket_manager.last_sent_heartbeat_time, {})
self.assertDictEqual(self.socket_manager.last_received_heartbeat, {})
self.assertDictEqual(self.socket_manager.pongs_without_heartbeat, {})
self.assertDictEqual(self.socket_manager.packet_map, {})
self.assertTrue(self.socket_manager.alive)
self.assertFalse(self.socket_manager.is_shutdown)
self.assertEqual(self.socket_manager.get_my_sender_id(), self.WORLD_ID)
def test_needed_heartbeat(self):
'''Ensure needed heartbeat sends heartbeats at the right time'''
self.socket_manager._safe_send = mock.MagicMock()
connection_id = self.AGENT_HEARTBEAT_PACKET.get_sender_connection_id()
# Ensure no failure under uninitialized cases
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager.last_received_heartbeat[connection_id] = None
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager._safe_send.assert_not_called()
# assert not called when called too recently
self.socket_manager.last_received_heartbeat[connection_id] = \
self.AGENT_HEARTBEAT_PACKET
self.socket_manager.last_sent_heartbeat_time[connection_id] = \
time.time() + 10
self.socket_manager._send_needed_heartbeat(connection_id)
self.socket_manager._safe_send.assert_not_called()
# Assert called when supposed to
self.socket_manager.last_sent_heartbeat_time[connection_id] = \
time.time() - SocketManager.HEARTBEAT_RATE
self.assertGreater(
time.time() -
self.socket_manager.last_sent_heartbeat_time[connection_id],
SocketManager.HEARTBEAT_RATE)
self.socket_manager._send_needed_heartbeat(connection_id)
self.assertLess(
time.time() -
self.socket_manager.last_sent_heartbeat_time[connection_id],
SocketManager.HEARTBEAT_RATE)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING)
used_packet = Packet.from_dict(used_packet_dict['content'])
self.assertNotEqual(self.AGENT_HEARTBEAT_PACKET.id, used_packet.id)
self.assertEqual(used_packet.type, Packet.TYPE_HEARTBEAT)
self.assertEqual(used_packet.sender_id, self.WORLD_ID)
self.assertEqual(used_packet.receiver_id, self.SENDER_ID)
self.assertEqual(used_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(used_packet.data, '')
self.assertEqual(used_packet.conversation_id, self.CONVERSATION_ID)
self.assertEqual(used_packet.requires_ack, False)
self.assertEqual(used_packet.blocking, False)
def test_ack_send(self):
'''Ensure acks are being properly created and sent'''
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._send_ack(self.AGENT_ALIVE_PACKET)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING)
used_packet = Packet.from_dict(used_packet_dict['content'])
self.assertEqual(self.AGENT_ALIVE_PACKET.id, used_packet.id)
self.assertEqual(used_packet.type, Packet.TYPE_ACK)
self.assertEqual(used_packet.sender_id, self.WORLD_ID)
self.assertEqual(used_packet.receiver_id, self.SENDER_ID)
self.assertEqual(used_packet.assignment_id, self.ASSIGNMENT_ID)
self.assertEqual(used_packet.conversation_id, self.CONVERSATION_ID)
self.assertEqual(used_packet.requires_ack, False)
self.assertEqual(used_packet.blocking, False)
self.assertEqual(self.AGENT_ALIVE_PACKET.status, Packet.STATUS_SENT)
def _send_packet_in_background(self, packet, send_time):
'''creates a thread to handle waiting for a packet send'''
def do_send():
self.socket_manager._send_packet(
packet, packet.get_receiver_connection_id(), send_time
)
self.sent = True
send_thread = threading.Thread(target=do_send, daemon=True)
send_thread.start()
time.sleep(0.02)
def test_blocking_ack_packet_send(self):
'''Checks to see if ack'ed blocking packets are working properly'''
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_1.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_1, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_1.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
connection_id = self.MESSAGE_SEND_PACKET_1.get_receiver_connection_id()
self.socket_manager._safe_put.assert_called_once_with(
connection_id, (send_time, self.MESSAGE_SEND_PACKET_1))
self.assertTrue(self.sent)
self.socket_manager._safe_send.reset_mock()
self.socket_manager._safe_put.reset_mock()
# Send it again - end outcome should be a call to send only
# with sent set
self.MESSAGE_SEND_PACKET_1.status = Packet.STATUS_ACK
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_1, send_time)
self.socket_manager._safe_send.assert_not_called()
self.socket_manager._safe_put.assert_not_called()
def test_non_blocking_ack_packet_send(self):
'''Checks to see if ack'ed non-blocking packets are working'''
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_3.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_3, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_3.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
self.socket_manager._safe_put.assert_called_once()
self.assertTrue(self.sent)
call_args = self.socket_manager._safe_put.call_args[0]
connection_id = call_args[0]
queue_item = call_args[1]
self.assertEqual(
connection_id,
self.MESSAGE_SEND_PACKET_3.get_receiver_connection_id())
expected_send_time = \
send_time + SocketManager.ACK_TIME[self.MESSAGE_SEND_PACKET_3.type]
self.assertAlmostEqual(queue_item[0], expected_send_time, places=2)
self.assertEqual(queue_item[1], self.MESSAGE_SEND_PACKET_3)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING)
self.assertDictEqual(used_packet_dict['content'],
self.MESSAGE_SEND_PACKET_3.as_dict())
def test_non_ack_packet_send(self):
'''Checks to see if non-ack'ed packets are working'''
self.socket_manager._safe_send = mock.MagicMock()
self.socket_manager._safe_put = mock.MagicMock()
self.sent = False
# Test a blocking acknowledged packet
send_time = time.time()
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_INIT)
self._send_packet_in_background(self.MESSAGE_SEND_PACKET_2, send_time)
self.assertEqual(self.MESSAGE_SEND_PACKET_2.status, Packet.STATUS_SENT)
self.socket_manager._safe_send.assert_called_once()
self.socket_manager._safe_put.assert_not_called()
self.assertTrue(self.sent)
used_packet_json = self.socket_manager._safe_send.call_args[0][0]
used_packet_dict = json.loads(used_packet_json)
self.assertEqual(
used_packet_dict['type'], data_model.SOCKET_ROUTE_PACKET_STRING)
self.assertDictEqual(used_packet_dict['content'],
self.MESSAGE_SEND_PACKET_2.as_dict())
def test_simple_packet_channel_management(self):
'''Ensure that channels are created, managed, and then removed
as expected
'''
self.socket_manager._safe_put = mock.MagicMock()
use_packet = self.MESSAGE_SEND_PACKET_1
worker_id = use_packet.receiver_id
assignment_id = use_packet.assignment_id
# Open a channel and assert it is there
self.socket_manager.open_channel(worker_id, assignment_id)
time.sleep(0.1)
connection_id = use_packet.get_receiver_connection_id()
self.assertTrue(self.socket_manager.run[connection_id])
self.assertIsNotNone(self.socket_manager.queues[connection_id])
self.assertEqual(
self.socket_manager.last_sent_heartbeat_time[connection_id], 0)
self.assertEqual(
self.socket_manager.pongs_without_heartbeat[connection_id], 0)
self.assertIsNone(
self.socket_manager.last_received_heartbeat[connection_id])
self.assertTrue(self.socket_manager.socket_is_open(connection_id))
self.assertFalse(self.socket_manager.socket_is_open(FAKE_ID))
# Send a bad packet, ensure it is ignored
resp = self.socket_manager.queue_packet(self.AGENT_ALIVE_PACKET)
self.socket_manager._safe_put.assert_not_called()
self.assertFalse(resp)
self.assertNotIn(self.AGENT_ALIVE_PACKET.id,
self.socket_manager.packet_map)
# Send a packet to an open socket, ensure it got queued
resp = self.socket_manager.queue_packet(use_packet)
self.socket_manager._safe_put.assert_called_once()
self.assertIn(use_packet.id, self.socket_manager.packet_map)
self.assertTrue(resp)
# Assert we can get the status of a packet in the map, but not
# existing doesn't throw an error
self.assertEqual(self.socket_manager.get_status(use_packet.id),
use_packet.status)
self.assertEqual(self.socket_manager.get_status(FAKE_ID),
Packet.STATUS_NONE)
# Assert that closing a thread does the correct cleanup work
self.socket_manager.close_channel(connection_id)
time.sleep(0.2)
self.assertFalse(self.socket_manager.run[connection_id])
self.assertNotIn(connection_id, self.socket_manager.queues)
self.assertNotIn(connection_id, self.socket_manager.threads)
self.assertNotIn(use_packet.id, self.socket_manager.packet_map)
# Assert that opening multiple threads and closing them is possible
self.socket_manager.open_channel(worker_id, assignment_id)
self.socket_manager.open_channel(worker_id + '2', assignment_id)
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.queues), 2)
self.socket_manager.close_all_channels()
time.sleep(0.1)
self.assertEqual(len(self.socket_manager.queues), 0)
def test_safe_put(self):
'''Test safe put and queue retrieval mechanisms'''
self.socket_manager._send_packet = mock.MagicMock()
use_packet = self.MESSAGE_SEND_PACKET_1
worker_id = use_packet.receiver_id
assignment_id = use_packet.assignment_id
connection_id = use_packet.get_receiver_connection_id()
# Open a channel and assert it is there
self.socket_manager.open_channel(worker_id, assignment_id)
send_time = time.time()
self.socket_manager._safe_put(connection_id, (send_time, use_packet))
# Wait for the sending thread to try to pull the packet from the queue
time.sleep(0.3)
# Ensure the right packet was popped and sent.
self.socket_manager._send_packet.assert_called_once()
call_args = self.socket_manager._send_packet.call_args[0]
self.assertEqual(use_packet, call_args[0])
self.assertEqual(connection_id, call_args[1])
self.assertEqual(send_time, call_args[2])
self.socket_manager.close_all_channels()
time.sleep(0.1)
self.socket_manager._safe_put(connection_id, (send_time, use_packet))
self.assertEqual(use_packet.status, Packet.STATUS_FAIL)
class TestSocketManagerMessageHandling(unittest.TestCase):
'''Test sending messages to the world and then to each of two agents,
along with failure cases for each
'''
def on_alive(self, packet):
self.alive_packet = packet
self.socket_manager.open_channel(
packet.sender_id, packet.assignment_id)
def on_message(self, packet):
self.message_packet = packet
def on_worker_death(self, worker_id, assignment_id):
self.dead_worker_id = worker_id
self.dead_assignment_id = assignment_id
def on_server_death(self):
self.server_died = True
def assertEqualBy(self, val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert time.time() - start_time < max_time, \
"Value was not attained in specified time"
time.sleep(0.1)
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.3)
self.agent1 = MockAgent(TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1, TASK_GROUP_ID_1)
self.agent2 = MockAgent(TEST_HIT_ID_2, TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2, TASK_GROUP_ID_1)
self.alive_packet = None
self.message_packet = None
self.dead_worker_id = None
self.dead_assignment_id = None
self.server_died = False
self.socket_manager = SocketManager(
'https://127.0.0.1', 3030, self.on_alive, self.on_message,
self.on_worker_death, TASK_GROUP_ID_1, 1, self.on_server_death)
def tearDown(self):
self.socket_manager.shutdown()
self.fake_socket.close()
def test_alive_send_and_disconnect(self):
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
alive_id = self.agent1.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, 8)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertIsNone(self.message_packet)
self.assertEqualBy(lambda: self.alive_packet is None, False, 8)
self.assertEqual(self.alive_packet.id, alive_id)
self.assertEqual(acked_packet.id, alive_id, 'Alive was not acked')
acked_packet = None
# assert sending heartbeats actually works, and that heartbeats don't
# get acked
self.agent1.send_heartbeat()
self.assertEqualBy(lambda: incoming_hb is None, False, 8)
self.assertIsNone(acked_packet)
self.assertGreater(hb_count, 0)
# Test message send from agent
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(lambda: self.message_packet is None, False, 8)
self.assertEqualBy(lambda: acked_packet is None, False, 8)
self.assertEqual(self.message_packet.id, acked_packet.id)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id, Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(), TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1, test_message_text_2, 't2')
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
# Test agent disconnect
self.agent1.always_beat = False
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_1, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
self.assertGreater(hb_count, 1)
def test_failed_ack_resend(self):
'''Ensures when a message from the manager is dropped, it gets
retried until it works as long as there hasn't been a disconnect
'''
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
alive_id = self.agent1.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, 8)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertIsNone(self.message_packet)
self.assertEqualBy(lambda: self.alive_packet is None, False, 8)
self.assertEqual(self.alive_packet.id, alive_id)
self.assertEqual(acked_packet.id, alive_id, 'Alive was not acked')
acked_packet = None
# assert sending heartbeats actually works, and that heartbeats don't
# get acked
self.agent1.send_heartbeat()
self.assertEqualBy(lambda: incoming_hb is None, False, 8)
self.assertIsNone(acked_packet)
self.assertGreater(hb_count, 0)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
self.agent1.send_acks = False
message_send_packet = Packet(
manager_message_id, Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(), TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1, test_message_text_2, 't2')
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertNotEqual(
self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
)
message_packet = None
self.agent1.send_acks = True
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
def test_one_agent_disconnect_other_alive(self):
acked_packet = None
incoming_hb = None
message_packet = None
hb_count = 0
def on_ack(*args):
nonlocal acked_packet
acked_packet = args[0]
def on_hb(*args):
nonlocal incoming_hb, hb_count
incoming_hb = args[0]
hb_count += 1
def on_msg(*args):
nonlocal message_packet
message_packet = args[0]
self.agent1.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.agent2.register_to_socket(self.fake_socket, on_ack, on_hb, on_msg)
self.assertIsNone(acked_packet)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
self.assertEqual(hb_count, 0)
# Assert alive is registered
self.agent1.send_alive()
self.agent2.send_alive()
self.assertEqualBy(lambda: acked_packet is None, False, 8)
self.assertIsNone(incoming_hb)
self.assertIsNone(message_packet)
# Start sending heartbeats
self.agent1.send_heartbeat()
self.agent2.send_heartbeat()
# Kill second agent
self.agent2.always_beat = False
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_2, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_2)
# Run rest of tests
# Test message send from agent
test_message_text_1 = 'test_message_text_1'
msg_id = self.agent1.send_message(test_message_text_1)
self.assertEqualBy(lambda: self.message_packet is None, False, 8)
self.assertEqualBy(lambda: acked_packet is None, False, 8)
self.assertEqual(self.message_packet.id, acked_packet.id)
self.assertEqual(self.message_packet.id, msg_id)
self.assertEqual(self.message_packet.data['text'], test_message_text_1)
# Test message send to agent
manager_message_id = 'message_id_from_manager'
test_message_text_2 = 'test_message_text_2'
message_send_packet = Packet(
manager_message_id, Packet.TYPE_MESSAGE,
self.socket_manager.get_my_sender_id(), TEST_WORKER_ID_1,
TEST_ASSIGNMENT_ID_1, test_message_text_2, 't2')
self.socket_manager.queue_packet(message_send_packet)
self.assertEqualBy(lambda: message_packet is None, False, 8)
self.assertEqual(message_packet.id, manager_message_id)
self.assertEqual(message_packet.data, test_message_text_2)
self.assertIn(manager_message_id, self.socket_manager.packet_map)
self.assertEqualBy(
lambda: self.socket_manager.packet_map[manager_message_id].status,
Packet.STATUS_ACK,
6,
)
# Test agent disconnect
self.agent1.always_beat = False
self.assertEqualBy(lambda: self.dead_worker_id, TEST_WORKER_ID_1, 8)
self.assertEqual(self.dead_assignment_id, TEST_ASSIGNMENT_ID_1)
if __name__ == '__main__':
unittest.main(buffer=True)
|
HydrusPaths.py
|
import os
import psutil
import re
import send2trash
import shlex
import shutil
import stat
import subprocess
import threading
import traceback
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusThreading
def AppendPathUntilNoConflicts( path ):
( path_absent_ext, ext ) = os.path.splitext( path )
good_path_absent_ext = path_absent_ext
i = 0
while os.path.exists( good_path_absent_ext + ext ):
good_path_absent_ext = path_absent_ext + '_' + str( i )
i += 1
return good_path_absent_ext + ext
def ConvertAbsPathToPortablePath( abs_path, base_dir_override = None ):
try:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
portable_path = os.path.relpath( abs_path, base_dir )
if portable_path.startswith( '..' ):
portable_path = abs_path
except:
portable_path = abs_path
if HC.PLATFORM_WINDOWS:
portable_path = portable_path.replace( '\\', '/' ) # store seps as /, to maintain multiplatform uniformity
return portable_path
def ConvertPortablePathToAbsPath( portable_path, base_dir_override = None ):
portable_path = os.path.normpath( portable_path ) # collapses .. stuff and converts / to \\ for windows only
if os.path.isabs( portable_path ):
abs_path = portable_path
else:
if base_dir_override is None:
base_dir = HG.controller.GetDBDir()
else:
base_dir = base_dir_override
abs_path = os.path.normpath( os.path.join( base_dir, portable_path ) )
if not HC.PLATFORM_WINDOWS and not os.path.exists( abs_path ):
abs_path = abs_path.replace( '\\', '/' )
return abs_path
def CopyAndMergeTree( source, dest ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
def CopyFileLikeToFileLike( f_source, f_dest ):
for block in ReadFileLikeAsBlocks( f_source ): f_dest.write( block )
def DeletePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Deleting {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWriteable( path )
try:
if os.path.isdir( path ):
shutil.rmtree( path )
else:
os.remove( path )
except Exception as e:
if 'Error 32' in str( e ):
# file in use by another process
HydrusData.DebugPrint( 'Trying to delete ' + path + ' failed because it was in use by another process.' )
else:
HydrusData.ShowText( 'Trying to delete ' + path + ' caused the following error:' )
HydrusData.ShowException( e )
def DirectoryIsWriteable( path ):
# testing access bits on directories to see if we can make new files is multiplatform hellmode
# so, just try it and see what happens
while not os.path.exists( path ):
try:
path = os.path.dirname( path )
except:
return False
temp_path = os.path.join( path, 'hydrus_temp_test_top_jej' )
if os.path.exists( temp_path ):
try:
os.unlink( temp_path )
except:
return False
try:
# using tempfile.TemporaryFile actually loops on PermissionError from Windows lmaaaooooo, thinking this is an already existing file
# so, just do it manually!
f = open( temp_path, 'wb' )
f.close()
os.unlink( temp_path )
return True
except:
return False
def FilterFreePaths( paths ):
free_paths = []
for path in paths:
HydrusThreading.CheckIfThreadShuttingDown()
if PathIsFree( path ):
free_paths.append( path )
return free_paths
def GetDefaultLaunchPath():
if HC.PLATFORM_WINDOWS:
return 'windows is called directly'
elif HC.PLATFORM_MACOS:
return 'open "%path%"'
elif HC.PLATFORM_LINUX:
return 'xdg-open "%path%"'
elif HC.PLATFORM_HAIKU:
return 'open "%path%"'
def GetDevice( path ):
path = path.lower()
try:
for scan_network in ( False, True ):
partition_infos = psutil.disk_partitions( all = scan_network )
def sort_descending_mountpoint( partition_info ): # i.e. put '/home' before '/'
return - len( partition_info.mountpoint )
partition_infos.sort( key = sort_descending_mountpoint )
for partition_info in partition_infos:
if path.startswith( partition_info.mountpoint.lower() ):
return partition_info.device
except UnicodeDecodeError: # wew lad psutil on some russian lad's fun filesystem
return None
return None
def GetFreeSpace( path ):
disk_usage = psutil.disk_usage( path )
return disk_usage.free
def LaunchDirectory( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
os.startfile( path )
else:
if HC.PLATFORM_MACOS:
cmd = [ 'open', path ]
elif HC.PLATFORM_LINUX:
cmd = [ 'xdg-open', path ]
elif HC.PLATFORM_HAIKU:
cmd = [ 'open', path ]
# setsid call un-childs this new process
sbp_kwargs = HydrusData.GetSubprocessKWArgs()
preexec_fn = getattr( os, 'setsid', None )
HydrusData.CheckProgramIsNotShuttingDown()
process = subprocess.Popen( cmd, preexec_fn = preexec_fn, **sbp_kwargs )
HydrusThreading.SubprocessCommunicate( process )
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def LaunchFile( path, launch_path = None ):
def do_it( launch_path ):
if HC.PLATFORM_WINDOWS and launch_path is None:
os.startfile( path )
else:
if launch_path is None:
launch_path = GetDefaultLaunchPath()
complete_launch_path = launch_path.replace( '%path%', path )
hide_terminal = False
if HC.PLATFORM_WINDOWS:
cmd = complete_launch_path
preexec_fn = None
else:
cmd = shlex.split( complete_launch_path )
preexec_fn = getattr( os, 'setsid', None )
if HG.subprocess_report_mode:
message = 'Attempting to launch ' + path + ' using command ' + repr( cmd ) + '.'
HydrusData.ShowText( message )
try:
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = hide_terminal, text = True )
HydrusData.CheckProgramIsNotShuttingDown()
process = subprocess.Popen( cmd, preexec_fn = preexec_fn, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **sbp_kwargs )
( stdout, stderr ) = HydrusThreading.SubprocessCommunicate( process )
if HG.subprocess_report_mode:
if stdout is None and stderr is None:
HydrusData.ShowText( 'No stdout or stderr came back.' )
if stdout is not None:
HydrusData.ShowText( 'stdout: ' + repr( stdout ) )
if stderr is not None:
HydrusData.ShowText( 'stderr: ' + repr( stderr ) )
except Exception as e:
HydrusData.ShowText( 'Could not launch a file! Command used was:' + os.linesep + str( cmd ) )
HydrusData.ShowException( e )
thread = threading.Thread( target = do_it, args = ( launch_path, ) )
thread.daemon = True
thread.start()
def MakeSureDirectoryExists( path ):
os.makedirs( path, exist_ok = True )
def MakeFileWriteable( path ):
if not os.path.exists( path ):
return
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if HC.PLATFORM_WINDOWS:
# this is actually the same value as S_IWUSR, but let's not try to second guess ourselves
desired_bits = stat.S_IREAD | stat.S_IWRITE
else:
# guarantee 644 for regular files m8
desired_bits = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if not ( desired_bits & current_bits ) == desired_bits:
os.chmod( path, current_bits | desired_bits )
except Exception as e:
HydrusData.Print( 'Wanted to add write permission to "{}", but had an error: {}'.format( path, str( e ) ) )
def safe_copy2( source, dest ):
copy_metadata = True
if HC.PLATFORM_WINDOWS:
mtime = os.path.getmtime( source )
# this is 1980-01-01 UTC, before which Windows can have trouble copying lmaoooooo
if mtime < 315532800:
copy_metadata = False
if copy_metadata:
# this overwrites on conflict without hassle
shutil.copy2( source, dest )
else:
shutil.copy( source, dest )
def MergeFile( source, dest ):
# this can merge a file, but if it is given a dir it will just straight up overwrite not merge
if not os.path.isdir( source ):
MakeFileWriteable( source )
if PathsHaveSameSizeAndDate( source, dest ):
DeletePath( source )
return True
try:
# this overwrites on conflict without hassle
shutil.move( source, dest, copy_function = safe_copy2 )
except Exception as e:
HydrusData.ShowText( 'Trying to move ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MergeTree( source, dest, text_update_hook = None ):
pauser = HydrusData.BigJobPauser()
if not os.path.exists( dest ):
try:
shutil.move( source, dest, copy_function = safe_copy2 )
except OSError:
# if there were read only files in source and this was partition to partition, the copy2 goes ok but the subsequent source unlink fails
# so, if it seems this has happened, let's just try a walking mergetree, which should be able to deal with these readonlies on a file-by-file basis
if os.path.exists( dest ):
MergeTree( source, dest, text_update_hook = text_update_hook )
else:
# I had a thing here that tried to optimise if dest existed but was empty, but it wasn't neat
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory move abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
ok = MergeFile( source_path, dest_path )
if not ok:
num_errors += 1
if num_errors == 0:
DeletePath( source )
def MirrorFile( source, dest ):
if not PathsHaveSameSizeAndDate( source, dest ):
try:
MakeFileWriteable( dest )
safe_copy2( source, dest )
except Exception as e:
HydrusData.ShowText( 'Trying to copy ' + source + ' to ' + dest + ' caused the following problem:' )
HydrusData.ShowException( e )
return False
return True
def MirrorTree( source, dest, text_update_hook = None, is_cancelled_hook = None ):
pauser = HydrusData.BigJobPauser()
MakeSureDirectoryExists( dest )
num_errors = 0
for ( root, dirnames, filenames ) in os.walk( source ):
if is_cancelled_hook is not None and is_cancelled_hook():
return
if text_update_hook is not None:
text_update_hook( 'Copying ' + root + '.' )
dest_root = root.replace( source, dest )
surplus_dest_paths = { os.path.join( dest_root, dest_filename ) for dest_filename in os.listdir( dest_root ) }
for dirname in dirnames:
pauser.Pause()
source_path = os.path.join( root, dirname )
dest_path = os.path.join( dest_root, dirname )
surplus_dest_paths.discard( dest_path )
MakeSureDirectoryExists( dest_path )
shutil.copystat( source_path, dest_path )
for filename in filenames:
if num_errors > 5:
raise Exception( 'Too many errors, directory copy abandoned.' )
pauser.Pause()
source_path = os.path.join( root, filename )
dest_path = os.path.join( dest_root, filename )
surplus_dest_paths.discard( dest_path )
ok = MirrorFile( source_path, dest_path )
if not ok:
num_errors += 1
for dest_path in surplus_dest_paths:
pauser.Pause()
DeletePath( dest_path )
def OpenFileLocation( path ):
def do_it():
if HC.PLATFORM_WINDOWS:
cmd = [ 'explorer', '/select,', path ]
elif HC.PLATFORM_MACOS:
cmd = [ 'open', '-R', path ]
elif HC.PLATFORM_LINUX:
raise NotImplementedError( 'Linux cannot open file locations!' )
elif HC.PLATFORM_HAIKU:
raise NotImplementedError( 'Haiku cannot open file locations!' )
sbp_kwargs = HydrusData.GetSubprocessKWArgs( hide_terminal = False )
HydrusData.CheckProgramIsNotShuttingDown()
process = subprocess.Popen( cmd, **sbp_kwargs )
HydrusThreading.SubprocessCommunicate( process )
thread = threading.Thread( target = do_it )
thread.daemon = True
thread.start()
def PathsHaveSameSizeAndDate( path1, path2 ):
if os.path.exists( path1 ) and os.path.exists( path2 ):
same_size = os.path.getsize( path1 ) == os.path.getsize( path2 )
same_modified_time = int( os.path.getmtime( path1 ) ) == int( os.path.getmtime( path2 ) )
if same_size and same_modified_time:
return True
return False
def PathIsFree( path ):
try:
stat_result = os.stat( path )
current_bits = stat_result.st_mode
if not current_bits & stat.S_IWRITE:
# read-only file, cannot do the rename check
return True
os.rename( path, path ) # rename a path to itself
return True
except OSError as e: # 'already in use by another process' or an odd filename too long error
HydrusData.Print( 'Already in use/inaccessible: ' + path )
return False
def ReadFileLikeAsBlocks( f ):
next_block = f.read( HC.READ_BLOCK_SIZE )
while len( next_block ) > 0:
yield next_block
next_block = f.read( HC.READ_BLOCK_SIZE )
def RecyclePath( path ):
if HG.file_report_mode:
HydrusData.ShowText( 'Recycling {}'.format( path ) )
HydrusData.ShowText( ''.join( traceback.format_stack() ) )
if os.path.exists( path ):
MakeFileWriteable( path )
try:
send2trash.send2trash( path )
except:
HydrusData.Print( 'Trying to recycle ' + path + ' created this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
HydrusData.Print( 'It has been fully deleted instead.' )
DeletePath( path )
def SanitizeFilename( filename ):
if HC.PLATFORM_WINDOWS:
# \, /, :, *, ?, ", <, >, |
filename = re.sub( r'\\|/|:|\*|\?|"|<|>|\|', '_', filename )
else:
filename = re.sub( '/', '_', filename )
return filename
|
DLProcessor.py
|
import socket, ssl
import threading
import time
from .DLInfos import Target
import logging
import traceback
import sys
if sys.version_info <= (2, 7):
from urllib import splitvalue, splitquery, urlencode
elif sys.version_info >= (3, 0):
from urllib.parse import splitvalue, splitquery, urlencode
logger = logging.getLogger('nbdler')
TMP_BUFFER_SIZE = 1024 * 1024 * 1
socket.setdefaulttimeout(3)
ssl._create_default_https_context = ssl._create_unverified_context
class OpaReq:
def __init__(self):
self.cut = []
self.pause = False
self.switch = False
self.wait = 0
def clear(self):
self.cut = []
self.pause = False
self.switch = False
self.wait = 0
class ErrorCounter(object):
_404__THRESHOLD = 5
_302__THRESHOLD = 20
RECV_TIMEOUT_THRESHOLD = 10
SOCKET_ERROR_THRESHOLD = 10
def __init__(self):
self._404_ = 0
self._302_ = 0
self.recv_error = 0
self.socket_error = 0
self.error_occur = False
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
if key != 'error_occur':
for i, j in self.check().items():
if getattr(self, i, 0) > getattr(ErrorCounter, j, 0):
self.error_occur = True
break
else:
self.error_occur = False
def isError(self):
return self.error_occur
def clear(self):
self._404_ = self._302_ = self.recv_error = self.socket_error = 0
def check(self):
return {
'_404_': '_404__THRESHOLD',
'_302_': '_302__THRESHOLD',
'recv_error': 'RECV_TIMEOUT_THRESHOLD',
'socket_error': 'SOCKET_ERROR_THRESHOLD',
}
class Processor(object):
def __init__(self, Progress, Urlid):
self.progress = Progress
self.url = None
self.urlid = Urlid
self.buff = []
self.buff_inc = 0
self.opareq = OpaReq()
self.target = Target()
self.__thread__ = None
self.__opa_lock__ = threading.Lock()
self.__run_lock__ = threading.Lock()
self.__buff_lock__ = threading.Lock()
self.error_counter = ErrorCounter()
def _Thread(self, *args, **kwargs):
return self.getHandler().thrpool.Thread(*args, **kwargs)
def loadUrl(self, Urlid):
urls = self.getHandler().url.getAllUrl()
if Urlid in urls:
self.url = urls[Urlid]
self.target.load(self.url.url)
else:
self.url = None
self.urlid = Urlid
def isReady(self):
return self.progress.isReady()
def isRunning(self):
return self.__thread__ and self.__thread__._started.is_set() and self.__thread__.isAlive()
def isPause(self):
return self.progress.isPause()
def isEnd(self):
return self.progress.isEnd()
def isGoEnd(self):
return self.progress.isGoEnd()
def getHandler(self):
return self.progress.globalprog.handler
def selfCheck(self):
if self.opareq.pause:
self.getPause()
return False
if not self.url:
self.loadUrl(self.urlid)
if not self.url or not self.getHandler().url.hasUrl(self.urlid):
self.getSwitch()
if self.isReady():
if not self.isRunning():
if self.error_counter.isError():
self.getSwitch()
if self.opareq.cut:
self.getCut()
if self.opareq.pause:
self.getPause()
return False
return True
else:
self.close()
return False
def run(self):
with self.__run_lock__:
if self.selfCheck():
thr = self._Thread(target=self.__getdata__, name='Nbdler-Processor')
self.__thread__ = thr
thr.start()
def __getdata__(self):
if self.opareq.cut:
self.getCut()
if self.opareq.pause:
self.getPause()
return
sock, buff = self.makeSocket()
if not sock:
self.error_counter.socket_error += 1
time.sleep(0.5)
self.run()
return
else:
status, _headers = parse_headers(buff[:(buff.index(b'\r\n\r\n'))])
self.target.update(headers=_headers, code=status)
if status == 200:
self.__200__(sock, buff)
elif status == 206:
self.__206__(sock, buff)
elif status == 302:
self.__302__(sock)
elif status == 405:
self.__405__(sock)
elif status == 404:
self.__404__(sock)
# elif status == 416:
# self.__416__(sock)
elif status != 206 and status != 200:
self.__404__(sock)
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error:
pass
def makeSocket(self):
sock = None
buff = b''
try:
ip = socket.gethostbyname(self.target.host)
if self.target.protocol == 'https':
sock = ssl.wrap_socket(socket.socket())
# ssl.SSLError: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure
sock.server_hostname = self.target.host
elif self.target.protocol == 'http':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert sock is not None
if not sock:
self.error_counter.socket_error += 1
return None, b''
sock.connect((ip, self.target.port))
packet = self.makeSocketPacket()
sock.send(packet)
buff = sock.recv(1024)
except Exception as e:
# print(e.args)
# traceback.print_exc()
self.error_counter.socket_error += 1
sock = None
else:
if not buff:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
else:
while b'\r\n\r\n' not in buff:
buff += sock.recv(512)
if b'HTTP' not in buff:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
sock = None
break
return sock, buff
def __302__(self, sock):
if self.target.headers.get('location', None):
self.target.load(self.target.headers.get('location'))
self.run()
def __206__(self, sock, buff):
self.__200__(sock, buff)
def __200__(self, sock, buff):
self.error_counter.clear()
buff = buff[(buff.index(b'\r\n\r\n') + 4):]
self.progress.go(len(buff))
self.__recv_loop__(sock, buff)
def __405__(self, sock):
self.getSwitch()
time.sleep(0.1)
self.run()
def __404__(self, sock):
self.error_counter._404_ += 1
if self.error_counter._404_ > 3:
self.url.reload()
time.sleep(0.3)
self.run()
def __other__(self, sock):
pass
def __416__(self, sock):
self.error_counter._404_ += 1
self.progress.go_in = 0
self.progress.done_inc = 0
time.sleep(0.1)
self.run()
def __recv_loop__(self, sock, buff):
while True:
if self.opareq.cut:
self.getCut()
if self.opareq.pause:
self.buffer(buff)
self.getPause()
break
# if self.opareq.wait:
# self.getWait()
last_len = len(buff)
rest = self.progress.length - self.progress.go_inc
try:
if rest == 0:
if len(buff) != 0:
self.buffer(buff)
buff = []
self.close()
break
elif rest < 4096:
buff += sock.recv(rest)
else:
buff += sock.recv(4096)
except:
self.error_counter.recv_error += 1
self.buffer(buff[:last_len])
return
if len(buff) == last_len:
self.error_counter.recv_error += 1
if len(buff) != 0:
self.buffer(buff)
return
if len(buff) - last_len > rest:
self.error_counter.recv_error += 1
return
self.progress.go(len(buff) - last_len)
if self.progress.go_inc >= self.progress.length:
self.buffer(buff[:self.progress.length - self.progress.done_inc - self.buff_inc])
self.close()
break
elif len(buff) >= TMP_BUFFER_SIZE:
self.buffer(buff)
buff = b''
def close(self):
self.progress.globalprog.checkAllGoEnd()
self.opareq.clear()
def pause(self):
self.opareq.pause = True
def getPause(self):
self.progress.status.pause()
self.opareq.pause = False
def makeSocketPacket(self):
range_format = self.url.range_format
Range = (self.progress.begin + self.progress.go_inc, self.progress.end)
add_headers = {
'Host': self.target.host,
'Connection': 'keep-alive',
}
if range_format[0] == '&':
path, query = splitquery(self.target.path)
query_dict = extract_query(query)
range_format = range_format % Range
for i in range_format[1:].split('&'):
param_key, param_value = splitvalue(i)
query_dict[param_key] = param_value
new_query = urlencode(query_dict)
http_head_top = 'GET %s HTTP/1.1\r\n' % ('%s?%s' % (path, new_query))
packet = http_head_top + '%s\r\n\r\n'
add_headers = {
'Host': self.target.host,
'Connection': 'keep-alive'
}
else:
http_head_top = 'GET %s HTTP/1.1\r\n' % self.target.path
packet = http_head_top + '%s\r\n\r\n'
range_field = range_format % Range
key_value = [i.strip() for i in range_field.split(':')]
key = key_value[0]
value = key_value[1]
add_headers[key] = value
add_headers['Accept-Ranges'] = 'bytes'
request_headers = dict(self.url.headers.items())
request_headers.update(add_headers)
request_headers_str = []
for i in request_headers.items():
request_headers_str.append(': '.join(i))
packet = packet % '\r\n'.join(request_headers_str)
return str.encode(str(packet))
def getWait(self):
time.sleep(self.opareq.wait)
def getSwitch(self):
next_urlid = self.getHandler().url.getNextId(self.urlid)
self.loadUrl(next_urlid)
self.error_counter.clear()
def buffer(self, buff):
with self.__buff_lock__:
self.buff.append(buff)
self.buff_inc += len(buff)
self.progress.globalprog.checkBuffer(len(buff))
def clearBuffer(self):
self.buff = []
self.buff_inc = 0
def releaseBuffer(self, f):
with self.__buff_lock__:
f.seek(self.progress.begin + self.progress.done_inc)
total_buff = 0
for block in self.buff:
f.write(block)
total_buff += len(block)
self.progress.done(total_buff)
self.clearBuffer()
def cutRequest(self, Range):
last_range = [self.progress.begin, self.progress.end]
self.opareq.cut = [Range[0], Range[1]]
while True:
if self.isEnd() or ((self.isReady() and not self.isRunning() and
not self.getHandler().thrpool.getThreadsFromName('Nbdler-SelfCheck')) or \
not self.opareq.cut):
break
time.sleep(0.1)
return [self.progress.end, last_range[1]] if last_range[1] != self.progress.end else []
def getCut(self):
while self.progress.begin + self.progress.go_inc >= self.opareq.cut[0]:
self.opareq.cut[0] += self.progress.globalprog.handler.file.BLOCK_SIZE
if self.opareq.cut[0] >= self.opareq.cut[1]:
retrange = []
else:
retrange = self.opareq.cut
if retrange:
self.progress.globalprog.cut(self.progress, retrange)
self.opareq.cut = []
# def __str__(self):
# return
def parse_headers(http_msg):
http_msg = bytes.decode(http_msg)
status_bar = http_msg[:http_msg.index('\r\n') + 2]
status = int(status_bar.split(' ')[1])
header = http_msg[http_msg.index('\r\n') + 2:]
res_headers = []
for i in header.split('\r\n'):
if i:
name = i[:i.index(':')].lower().strip()
value = i[i.index(':') + 1:].lstrip()
res_headers.append((name, value))
return status, res_headers
def extract_query(query_str):
querys = {}
if query_str:
for i in query_str.split('&'):
key_value = splitvalue(i)
querys[key_value[0]] = key_value[1]
return querys
|
SatadishaModule_final_trie.py
|
# coding: utf-8
# In[298]:
import sys
import re
import string
import csv
import random
import time
#import binascii
#import shlex
import numpy as np
import pandas as pd
from itertools import groupby
from operator import itemgetter
from collections import Iterable, OrderedDict
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from scipy import stats
#from datasketch import MinHash, MinHashLSH
import NE_candidate_module as ne
import NE_candidate_module as ne
import Mention
import threading, queue
import time
import datetime
import copy
import trie as trie
# In[324]:
#---------------------Existing Lists--------------------
cachedStopWords = stopwords.words("english")
tempList=["i","and","or","other","another","across","were","you","then","still","is","while","till","nor","perhaps","otherwise","until","sometimes","sometime","seem","cannot","seems","because","can","like","into","able","unable","either","neither","if","we","it","else","elsewhere","how","not","what","who","when","where","where's","where’s","where'd","where’d","where'll","where’ll","who's","who’s","he's","he’s","he’d","he'd","she's","she’s","she’d","she'd","let","today","tomorrow","tonight","let's","let’s","lets","know","make","oh","via","i","yet","must","mustnt","mustn't","mustn’t","i'll","i’ll","you'll","you’ll","we'll","we’ll","done","doesnt","doesn't","doesn’t","dont","don't","don’t","did","didnt","didn't","didn’t","much","without","could","couldn't","couldn’t","would","wouldn't","wouldn’t","should","shouldn't","shouldn’t","shall","isn't","isn’t","hasn't","hasn’t","was","wasn't","wasn’t","also","let's","let’s","let","well","just","everyone","anyone","noone","none","someone","theres","there's","there’s","everybody","nobody","somebody","anything","else","elsewhere","something","nothing","everything","i'd","i’d","i’m","won't","won’t","i’ve","i've","they're","they’re","we’re","we're","we'll","we’ll","we’ve","we've","they’ve","they've","they’d","they'd","they’ll","they'll","again","you're","you’re","you've","you’ve","thats","that's",'that’s','here’s',"here's","what's","what’s","i’m","i'm","a","so","except","arn't","aren't","arent","this","when","it","it’s","it's","he's","she's","she'd","he'd","he'll","she'll","she’ll","many","can't","cant","can’t","werent","weren't","were’t","even","yes","no","these","here","there","to","maybe","<hashtag>","<hashtag>.","ever","every","never","there's","there’s","whenever","wherever","however","whatever","always"]
prep_list=["in","at","of","on","with","by","&;"] #includes common conjunction as well
article_list=["a","an","the"]
day_list=["sunday","monday","tuesday","wednesday","thursday","friday","saturday","mon","tues","wed","thurs","fri","sat","sun"]
month_list=["january","february","march","april","may","june","july","august","september","october","november","december","jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
for item in tempList:
if item not in cachedStopWords:
cachedStopWords.append(item)
cachedStopWords.remove("don")
#cachedStopWords.remove("may")
cachedTitles = ["mr.","mr","mrs.","mrs","miss","ms","sen.","dr","dr.","prof.","president","congressman"]
chat_word_list=["please","4get","ooh","idk","oops","yup","stfu","uhh","2b","dear","yay","btw","ahhh","b4","ugh","ty","cuz","coz","sorry","yea","asap","ur","bs","rt","lfmao","slfmao","u","r","nah","umm","ummm","thank","thanks","congrats","whoa","rofl","ha","ok","okay","hey","hi","huh","ya","yep","yeah","fyi","duh","damn","lol","omg","congratulations","fuck","wtf","wth","aka","wtaf","xoxo","rofl","imo","wow","fck","haha","hehe","hoho"]
#string.punctuation.extend('“','’','”')
#---------------------Existing Lists--------------------
# In[300]:
class SatadishaModule():
def __init__(self):
print("hello")
#self.batch=batch
#self.batch=self.batch[:3000:]
self.counter=0
#self.extract()
def flatten(self,mylist, outlist,ignore_types=(str, bytes, int, ne.NE_candidate)):
if mylist !=[]:
for item in mylist:
#print not isinstance(item, ne.NE_candidate)
if isinstance(item, list) and not isinstance(item, ignore_types):
self.flatten(item, outlist)
else:
if isinstance(item,ne.NE_candidate):
item.phraseText=item.phraseText.strip(' \t\n\r')
item.reset_length()
else:
if type(item)!= int:
item=item.strip(' \t\n\r')
outlist.append(item)
return outlist
def normalize(self,word):
strip_op=word
strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()
strip_op=(strip_op.lstrip('“‘’”')).rstrip('“‘’”')
#strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,"'s","",1),"’s","",1),"’s","",1)
if strip_op.endswith("'s"):
li = strip_op.rsplit("'s", 1)
return ''.join(li)
elif strip_op.endswith("’s"):
li = strip_op.rsplit("’s", 1)
return ''.join(li)
else:
return strip_op
#@profile
def extract(self,batch,batch_number):
#df = read_csv('eric_trump.csv', index_col='ID', header=0, encoding='utf-8')
print("Phase I extracting now")
time_in=time.time()
self.batch=batch
#output.csv
#df_out= DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'usertype', 'TweetSentence', 'phase1Candidates'))
self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','start_time','entry_batch','annotation'))
if(self.counter==0):
#self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','correct_candidates_tweet'))
#dict1 = {'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}
self.CTrie=trie.Trie("ROOT")
self.ME_EXTR=Mention.Mention_Extraction()
self.phase2stopWordList=[]
#self.df_out= pd.DataFrame({'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}, index=[0,])
#%%timeit -o
#module_capital_punct.main:
'''I am running this for 100 iterations for testing purposes. Of course you no longer need this for loop as you are
#running one tuple at a time'''
#if(self.counter==0):
#initializing candidateBase with a dummy node
#self.interCWSGap={}
#candidateBase={}
#NE_container=DataFrame(columns=('candidate', 'frequency', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'))
count=0
ne_count=0
userMention_count=0
#token_count=0
NE_list_phase1=[]
UserMention_list=[]
df_holder=[]
#--------------------------------------PHASE I---------------------------------------------------
for index, row in self.batch.iterrows():
#print(index)
now = datetime.datetime.now()
#now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
#hashtags=str(row['Discussion'])
hashtags=str(row['HashTags'])
user=str(row['User'])
#userType=str(row['User Type'])
tweetText=str(row['TweetText'])
#correct_candidates_tweet=str(row['Mentions'])
#print(str(index))
annot_raw=str(row['mentions_other'])
split_list=annot_raw.split(";")
#split_listFilter=list(filter(lambda element: element.strip()!='', split_list))
split_listFilter=list(filter(None, split_list))
#annotations in list of list structure
filtered_2_times=list(map(lambda element: list(filter(None, element.split(','))), split_list))
#capitalization module
#if all words are capitalized:
# print(index)
# if tweetText.isupper():
# print(index,tweetText)
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
# elif tweetText.islower():
# print(index,tweetText)
# print("",end="")
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
#else:
ne_List_final=[]
userMention_List_final=[]
#pre-modification: returns word list split at whitespaces; retains punctuation
tweetSentences=list(filter (lambda sentence: len(sentence)>1, tweetText.split('\n')))
tweetSentenceList_inter=self.flatten(list(map(lambda sentText: sent_tokenize(sentText.lstrip().rstrip()),tweetSentences)),[])
tweetSentenceList=list(filter (lambda sentence: len(sentence)>1, tweetSentenceList_inter))
#filtering nan values
if(len(filtered_2_times[0])==1):
if(filtered_2_times[0][0]=='nan'):
filtered_2_times[0]=[]
# print(index,filtered_2_times,tweetSentenceList)
for sen_index in range(len(tweetSentenceList)):
sentence=tweetSentenceList[sen_index]
modified_annotations=[self.normalize(candidate)for candidate in filtered_2_times[sen_index]]
annotation=[]
for candidate in modified_annotations:
if(candidate=="nan"):
pass
else:
annotation.append(candidate)
# for i in filtered_2_times[sen_index]:
# if(i=="nan"):
#print(sentence)
#print(sen_index)
#tweetWordList= list(filter(lambda word:(word.strip(string.punctuation))!="",sentence.split()))
phase1Out=""
if((not tweetText.isupper()) &(not tweetText.islower())):
tempList=[]
tempWordList=sentence.split()
#print(tempWordList)
for word in tempWordList:
temp=[]
if "..." in word:
#print("here")
temp=list(filter(lambda elem: elem!='',word.split("...")))
# if(temp1):
# temp=list(map(lambda elem: elem+'...', temp1[:-1]))
# temp.append(temp1[-1])
elif ".." in word:
temp=list(filter(lambda elem: elem!='',word.split("..")))
# if(temp1):
# temp=list(map(lambda elem: elem+'..', temp1[:-1]))
# temp.append(temp1[-1])
elif (("?" in word)&(not word.endswith("?"))):
temp1=list(filter(lambda elem: elem!='',word.split("?")))
if(temp1):
temp=list(map(lambda elem: elem+'?', temp1[:-1]))
temp.append(temp1[-1])
elif ((":" in word)&(not word.endswith(":"))):
temp1=list(filter(lambda elem: elem!='',word.split(":")))
if(temp1):
temp=list(map(lambda elem: elem+':', temp1[:-1]))
temp.append(temp1[-1])
elif (("," in word)&(not word.endswith(","))):
#temp=list(filter(lambda elem: elem!='',word.split(",")))
temp1=list(filter(lambda elem: elem!='',word.split(",")))
if(temp1):
temp=list(map(lambda elem: elem+',', temp1[:-1]))
temp.append(temp1[-1])
elif (("/" in word)&(not word.endswith("/"))):
temp1=list(filter(lambda elem: elem!='',word.split("/")))
if(temp1):
temp=list(map(lambda elem: elem+'/', temp1[:-1]))
temp.append(temp1[-1])
#print(index, temp)
else:
#if word not in string.punctuation:
temp=[word]
if(temp):
tempList.append(temp)
tweetWordList=self.flatten(tempList,[])
#print(tweetWordList)
#token_count+=len(tweetWordList)
#returns position of words that are capitalized
#print(tweetWordList)
tweetWordList_cappos = list(map(lambda element : element[0], filter(lambda element : self.capCheck(element[1]), enumerate(tweetWordList))))
#print(tweetWordList_cappos)
#returns list of stopwords in tweet sentence
combined_list_here=([]+cachedStopWords+article_list+prep_list+chat_word_list)
#combined_list_here.remove("the")
tweetWordList_stopWords=list(filter(lambda word: ((word[0].islower()) & (((word.strip()).strip(string.punctuation)).lower() in combined_list_here))|(word.strip() in string.punctuation)|(word.startswith('@')), tweetWordList))
#returns list of @userMentions
userMentionswPunct=list(filter(lambda phrase: phrase.startswith('@'), tweetWordList))
userMentions=list(map(lambda mention: mention.rstrip(string.punctuation), userMentionswPunct))
userMention_count+=len(userMentions)
userMention_List_final+=userMentions
'''#function to process and store @ user mentions---- thread 1
#print(userMention_List_final)
threading.Thread(target=self.ME_EXTR.ComputeAll, args=(userMention_List_final,)).start()'''
#non @usermentions are processed in this function to find non @, non hashtag Entities---- thread 2
ne_List_allCheck=[]
#if(len(tweetWordList)>len(tweetWordList_cappos)):
#print(len(tweetWordList),str(len(tweetWordList_cappos)),str(len(tweetWordList_stopWords)))
if((len(tweetWordList))>(len(tweetWordList_cappos))):
#q = queue.Queue()
#threading.Thread(target=self.trueEntity_process, args=(tweetWordList_cappos,tweetWordList,q)).start()
ne_List_allCheck= self.trueEntity_process(tweetWordList_cappos,tweetWordList)
#ne_List_allCheck= q.get()
ne_count+=len(ne_List_allCheck)
ne_List_final+=ne_List_allCheck
#write row to output dataframe
if(len(tweetWordList)==len(tweetWordList_cappos)):
phase1Out="nan"
if(len(ne_List_allCheck)>0):
for candidate in ne_List_allCheck:
position = '*'+'*'.join(str(v) for v in candidate.position)
position=position+'*'
candidate.set_sen_index(sen_index)
phase1Out+=(((candidate.phraseText).lstrip(string.punctuation)).strip())+ '::'+str(position)+"||"
else:
phase1Out="nan"
#print(self.df_out.columns)
dict1 = {'tweetID':str(index), 'sentID':str(sen_index), 'hashtags':hashtags, 'user':user, 'TweetSentence':sentence, 'phase1Candidates':phase1Out,'start_time':now,'entry_batch':batch_number,'annotation':annotation}
df_holder.append(dict1)
#self.df_out.append(outrow)
#self.df_out=self.df_out.append(outrow,ignore_index=True)
for candidate in ne_List_final:
#self.insert_dict (candidate,self.NE_container,candidateBase,index,candidate.sen_index,batch_number)
candidateText=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
candidateText=(candidateText.lstrip('“‘’”')).rstrip('“‘’”')
candidateText= self.rreplace(self.rreplace(self.rreplace(candidateText,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
if not ((candidateText in combined)|(candidateText.isdigit())|(self.is_float(candidateText))):
self.CTrie.__setitem__(candidateText.split(),len(candidateText.split()),candidate.features,batch_number)
#self.printList(ne_List_final)
#if(userMention_List_final):
# print(userMention_List_final)
NE_list_phase1+=ne_List_final
UserMention_list+=userMention_List_final
#print ("\n")
#fieldnames=['candidate','freq','length','cap','start_of_sen','abbrv','all_cap','is_csl','title','has_no','date','is_apostrp','has_inter_punct','ends_verb','ends_adverb','change_in_cap','topic_ind','entry_time','entry_batch','@mention']
#updated_NE_container=[]
'''#Updating trie with @mention info
self.CTrie.updateTrie("",self.ME_EXTR)'''
time_out=time.time()
#for display purposes Iterating through the trie
'''candidateBase= self.CTrie.__iter__()
for node in candidateBase:
print(node)'''
'''for key in self.NE_container.keys():
val=self.NE_container[key]+[str(ME_EXTR.checkInDictionary(key))]
#index+=1
#updated_NE_container[key]=val
dict1 = {'candidate':key, 'freq':val[0],'length':val[1],'cap':val[2],'start_of_sen':val[3],'abbrv':val[4],'all_cap':val[5],'is_csl':val[6],'title':val[7],'has_no':val[8],'date':val[9],'is_apostrp':val[10],'has_inter_punct':val[11],'ends_verb':val[12],'ends_adverb':val[13],'change_in_cap':val[14],'topic_ind':val[15],'entry_time':val[16],'entry_batch':val[17],'@mention':val[18]}
updated_NE_container.append(dict1)'''
'''with open('candidate_base.csv', 'w') as output_candidate:
#with open('candidates.csv', 'w') as output_candidate:
writer = csv.writer(output_candidate)
writer.writerow(fieldnames)
for k, v in updated_NE_container.items():
writer.writerow([k] + v)'''
#print("Total number of tokens processed: "+str(token_count))
#print ("Total number of candidate NEs extracted: "+str(len(candidateBase)))
#print(self.NE_container.items())
#freqs=pd.read_csv('candidate_base.csv', encoding = 'utf-8',delimiter=',')
#freqs = pd.DataFrame(updated_NE_container, columns=fieldnames)
#freqs = pd.DataFrame()
#freqs=pd.DataFrame(list(self.NE_container.items()), orient='index')#columns=fieldnames)
self.append_rows(df_holder)
self.counter=self.counter+1
#return (copy.deepcopy(self.df_out),copy.deepcopy(freqs),time_in,time_out)
return (self.df_out,self.CTrie,time_in,time_out)
#return sorted_candidateBase
#@profile
def append_rows(self,df_holder):
df = pd.DataFrame(df_holder)
self.df_out=self.df_out.append(df)
self.df_out.to_csv('tweet_base.csv' ,sep=',', encoding='utf-8')
def rreplace(self,s, old, new, occurrence):
if s.endswith(old):
li = s.rsplit(old, occurrence)
return new.join(li)
else:
return s
def stopwordReplace(self, candidate):
if(candidate.features[ne.is_quoted]):
return candidate
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
wordlist=list(filter(lambda word: word!='', candidate.phraseText.split()))
pos=candidate.position
#print(candidate.phraseText,wordlist,pos)
start=0
flag=False
while(start!=len(pos)):
if(wordlist[start].lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined):
#flag=True
break
start+=1
end=len(pos)-1
while(end>=0):
#print(wordlist[end])
if(wordlist[end].lstrip(string.punctuation).rstrip(string.punctuation).strip() not in combined):
#flag=True
break
end-=1
#print(start,end)
updated_pos=pos[start:(end+1)]
updated_phrase=' '.join(wordlist[start:(end+1)])
#print(updated_pos,updated_phrase)
candidate.phraseText=updated_phrase
candidate.position=updated_pos
return candidate
# In[301]:
#candidate: 'frequency','length', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'
def is_float(self,string):
try:
f=float(string)
if(f==0.0):
return True
else:
return ((f) and (string.count(".")==1))
#return True# True if string is a number with a dot
except ValueError: # if string is not a number
return False
def insert_dict(self,candidate,NE_container,candidateBase,tweetID,sentenceID,batch):
key=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
key=(key.lstrip('“‘’”')).rstrip('“‘’”')
key= self.rreplace(self.rreplace(self.rreplace(key,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
try:
if ((key in combined)|(key.isdigit())|(self.is_float(key))):
return
except TypeError:
print(key)
tweetID=str(tweetID)
sentenceID=str(sentenceID)
if key in self.NE_container:
feature_list=self.NE_container[key]
feature_list[0]+=1
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
else:
now = datetime.datetime.now()
now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
feature_list=[0]*17
feature_list[0]+=1
feature_list[1]=candidate.length
#call background process to check for non capitalized occurences
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
feature_list.append(now)
feature_list.append(batch)
self.NE_container[key] = feature_list
#insert in candidateBase
'''if key in candidateBase.keys():
#candidateBase[key]=candidateBase[key]+[str(tweetID)+":"+str(sentenceID)]
if(tweetID in candidateBase[key]):
if(sentenceID in candidateBase[key][tweetID] ):
candidateBase[key][tweetID][sentenceID]=candidateBase[key][tweetID][sentenceID]+1
else:
candidateBase[key][tweetID][sentenceID]=1
else:
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1
#c=[(y,str(idx)) for idx,y in enumerate( a) if y not in b]
#candidateBase[key]
else:
#candidateBase[key]=[str(tweetID)+":"+str(sentenceID)]
candidateBase[key]={}
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1'''
return
# In[302]:
def printList(self,mylist):
print("["),
#print "[",
for item in mylist:
if item != None:
if isinstance(item,ne.NE_candidate):
item.print_obj()
#print (item.phraseText)
else:
print (item+",", end="")
#print item+",",
#print "]"
print("]")
return
# In[303]:
# In[304]:
def consecutive_cap(self,tweetWordList_cappos,tweetWordList):
output=[]
#identifies consecutive numbers in the sequence
#print(tweetWordList_cappos)
for k, g in groupby(enumerate(tweetWordList_cappos), lambda element: element[0]-element[1]):
output.append(list(map(itemgetter(1), g)))
count=0
if output:
final_output=[output[0]]
for first, second in (zip(output,output[1:])):
#print(first,second)
#print(tweetWordList[first[-1]])
if ((not (tweetWordList[first[-1]]).endswith('"'))&((second[0]-first[-1])==2) & (tweetWordList[first[-1]+1].lower() in prep_list)):
(final_output[-1]).extend([first[-1]+1]+second)
elif((not (tweetWordList[first[-1]].endswith('"')))&((second[0]-first[-1])==3) & (tweetWordList[first[-1]+1].lower() in prep_list)& (tweetWordList[first[-1]+2].lower() in article_list)):
(final_output[-1]).extend([first[-1]+1]+[first[-1]+2]+second)
else:
final_output.append(second)
#merge_positions.append(False)
else:
final_output=[]
return final_output
# In[305]:
#basically splitting the original NE_candidate text and building individual object from each text snippet
def build_custom_NE(self,phrase,pos,prototype,feature_index,feature_value):
#print("Enters")
position=pos
custom_NE= ne.NE_candidate(phrase,position)
for i in range(15):
custom_NE.set_feature(i,prototype.features[i])
custom_NE.set_feature(feature_index,feature_value)
if (feature_index== ne.is_csl) & (feature_value== True):
custom_NE.set_feature(ne.start_of_sentence, False)
custom_NE=self.entity_info_check(custom_NE)
return custom_NE
# In[306]:
def abbrv_algo(self,ne_element):
'''abbreviation algorithm
trailing apostrophe:
|period:
| multiple letter-period sequence:
| all caps
| non period:
| ?/! else drop apostrophe
else:
unchanged
'''
phrase= ne_element.phraseText
#print("=>"+phrase)
#since no further split occurs we can set remaining features now
ne_element.set_feature(ne.capitalized, True)
if ne_element.phraseText.isupper():
ne_element.set_feature(ne.all_capitalized, True)
else:
ne_element.set_feature(ne.all_capitalized, False)
abbreviation_flag=False
p=re.compile(r'[^a-zA-Z\d\s]$')
match_list = p.findall(phrase)
if len(match_list)>0:
#print("Here")
if phrase.endswith('.'):
#print("Here")
p1= re.compile(r'([a-zA-Z][\.]\s*)')
match_list = p1.findall(phrase)
if ((len(match_list)>1) & (len(phrase)<6)):
#print ("1. Found abbreviation: "+phrase)
abbreviation_flag= True
else:
if (phrase[-2]!=' '):
phrase= phrase[:-1]
else:
#if phrase.endswith(string.punctuation):
if (phrase[-2]!=' '):
phrase= phrase[:-1]
#if not (phrase.endswith('?')|phrase.endswith('!')|phrase.endswith(')')|phrase.endswith('>')):
#phrase= phrase[:-1]
else:
p2=re.compile(r'([^a-zA-Z0-9_\s])')
match_list = p2.findall(phrase)
if ((len(match_list)==0) & (phrase.isupper()) & (len(phrase)<7)& (len(phrase)>1)):
#print ("2. Found abbreviation!!: "+phrase)
abbreviation_flag= True
else:
#print("Here-> "+phrase)
p3= re.compile(r'([A-Z][.][A-Z])')
p4= re.compile(r'\s')
match_list = p3.findall(phrase)
match_list1 = p4.findall(phrase)
if ((len(match_list)>0) & (len(match_list1)==0)):
abbreviation_flag= True
#print ("3. Found abbreviation!!: "+phrase)
#element= ne.NE_candidate(phrase.strip())
ne_element.phraseText=phrase
ne_element.reset_length()
ne_element.set_feature(ne.abbreviation, abbreviation_flag)
return ne_element
# In[307]:
def punct_clause(self,NE_phrase_in):
NE_phrases=self.entity_info_check(NE_phrase_in)
cap_phrases=NE_phrases.phraseText.strip()
final_lst=[]
#print (cap_phrases,NE_phrases.features[ne.date_indicator])
if (re.compile(r'[^a-zA-Z0-9_\s]')).findall(cap_phrases):
#case of intermediate punctuations: handles abbreviations
p1= re.compile(r'(?:[a-zA-Z0-9][^a-zA-Z0-9_\s]\s*)')
match_lst = p1.findall(cap_phrases)
#print(match_lst)
if match_lst:
index= (list( p1.finditer(cap_phrases) )[-1]).span()[1]
p= re.compile(r'[^a-zA-Z\d\s]')
match_list = p.findall(cap_phrases)
p2=re.compile(r'[^a-zA-Z\d\s]$') #ends with punctuation
if ((len(match_list)>0)&(len(match_lst)>0)&((len(match_list)-len(match_lst))>0)):
if (p2.findall(cap_phrases)):
#only strips trailing punctuations, not intermediate ones following letters
cap_phrases = cap_phrases[0:index]+re.sub(p, '', cap_phrases[index:])
NE_phrases.phraseText= cap_phrases
#comma separated NEs
#lst=filter(lambda(word): word!="", re.split('[,]', cap_phrases))
#print ("=>"+ cap_phrases)
start_of_sentence_fix=NE_phrases.features[ne.start_of_sentence]
#temp=re.split("\...", cap_phrases)
#inter=self.flatten(list(map(lambda elem: re.split('[,:!…]',elem),temp)),[])
#print("'''",inter)
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
splitList=re.split('["‘’“”()/,;:!?…]',cap_phrases)
splitList=list(filter(lambda word: ((word!="")&(word.lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined)), splitList))
#print("==",splitList)
wordlstU=list(map(lambda word: word.strip().strip(string.punctuation), splitList))
wordlstU=list(filter(lambda word: word!="", wordlstU))
wordlst=list(filter(lambda word: ((word.strip().strip(string.punctuation))[0].isupper()|(word.strip().strip(string.punctuation))[0].isdigit()), wordlstU))
#print(":::",wordlst)
if ((NE_phrases.features[ne.date_indicator]==False)):
#print("hehe")
if(len(splitList)>1):
if(len(wordlst)>0):
#print("here::")
pos=NE_phrases.position
combined=[]
prev=0
for i in range(len(wordlst)):
word=wordlst[i]
word_len=len(list(filter(lambda individual_word: individual_word!="", re.split('[ ]', word))))
word_pos=pos[(prev):(prev+word_len)]
prev=prev+word_len
combined+=[[word]+word_pos]
lst_nsw=list(filter(lambda element: (((str(element[0])).strip(string.punctuation).lower() not in combined)& (not (str(element[0])).strip(string.punctuation).isdigit()) & (len(str(element[0]))>1)) ,combined))
#print ("++",lst_nsw)
if(lst_nsw):
final_lst= list(map(lambda element:self.build_custom_NE(str(element[0]),element[1:],NE_phrases,ne.is_csl,True), lst_nsw))
final_lst[0].set_feature(ne.start_of_sentence, NE_phrases.features[ne.start_of_sentence])
else:
final_lst=[]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
#check abbreviation
#print("++",final_lst)
if(final_lst):
final_lst= list(map(lambda phrase: self.abbrv_algo(phrase), final_lst))
#print(lst)
return final_lst
# In[308]:
#%%timeit -o
def f(self,y,sflag,quoteFlag,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#print(sflag)
if sflag:
left=""
right=""
lp=(-1)
rp=(-1)
i=0
j=len(y)-1
flag1=False
flag2=False
x=[]
while (((flag1==False)|(flag2==False))&((j-i)>0)):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(flag1,flag2)
#if((flag1==False)|(flag2==False)):
# while (((j-i)!=0)|((flag1==False)|(flag2==False))):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print(left)
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(lp,rp)
if(lp==rp):
if(lp!=-1):
x=[y[lp]]
else:
x=y[lp:(rp+1)]
else:
x=y
#print(x)
if(x):
list1=list(map(lambda word: tweetWordList[word], x))
phrase=" ".join(e for e in list1)
#print(phrase)
phrase1="".join(list1)
#if not ((phrase[0].isdigit()) & (len(x)==1)):
if not (phrase1.strip().isdigit()):
NE_phrase= ne.NE_candidate(phrase.strip(),x)
if 0 in x:
NE_phrase.set_feature(ne.start_of_sentence,True)
else:
NE_phrase.set_feature(ne.start_of_sentence,False)
NE_phrase.set_feature(ne.is_quoted,quoteFlag)
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
#print("====>>",NE_phrase.phraseText)
return NE_phrase
# In[309]:
def capCheck(self,word):
combined_list=[]+cachedStopWords+prep_list+chat_word_list+article_list
if word.startswith('@'):
return False
elif "<Hashtag" in word:
return False
#elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower() in combined_list:
elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)) in combined_list:
# if((word=="The")|(word=="THE")):
# return True
# else:
return True
elif word[0].isdigit():
return True
else:
p=re.compile(r'^[\W]*[A-Z]')
l= p.match(word)
if l:
return True
else:
return False
# In[310]:
def title_check(self,ne_phrase):
title_flag=False
words=ne_phrase.phraseText.split()
for word in words:
if word.lower() in cachedTitles:
title_flag= True
break
ne_phrase.set_feature(ne.title,title_flag)
return ne_phrase
# In[311]:
def entity_info_check(self,ne_phrase):
flag1=False #has number
flag3=False
flag_ind=[] #is number
month_ind=[]
date_num_holder=[]
words=ne_phrase.phraseText.split()
for word in words:
word=(word.strip()).rstrip(string.punctuation).lower()
punct_flag=False
for char in word:
if ((char in string.punctuation)|(char in ['“','‘','’','”','…'])):
punct_flag=True
break
#if ((not word.isalpha())& (not "'s" in word) & (not "’s" in word)):'‘“"’”
if ((not word.isalpha())& (not punct_flag)):
flag_ind+=[True]
if word.isdigit():
date_num_holder+=['num']
else:
date_num_holder+=['alpha']
else:
flag_ind+=[False]
if word in month_list:
month_ind+=[True]
date_num_holder+=['month']
elif word in day_list:
date_num_holder+=['day']
elif word in prep_list:
date_num_holder+=['preposition']
elif word in article_list:
date_num_holder+=['article']
else:
#print("=>"+word)
date_num_holder+=['string']
if True in flag_ind:
flag1=True
if True in month_ind:
flag3=True
ne_phrase.set_feature(ne.has_number,flag1)
ne_phrase.set_feature(ne.date_indicator,flag3)
ne_phrase.set_date_num_holder(date_num_holder)
return ne_phrase
# In[312]:
#removing commonly used expletives, enunciated chat words and other common words (like days of the week, common expressions)
def slang_remove(self,ne_phrase):
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
p1= re.compile(r'([A-Za-z]+)\1\1{1,}')
match_lst = p1.findall(phrase)
if phrase in article_list:
return True
elif phrase in day_list:
return True
#elif phrase in month_list:
#return True
elif match_lst:
return True
else:
return False
# In[313]:
def apostrope_check(self,ne_phrase):
apostrophe="'s"
bad_apostrophe="’s"
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
if (apostrophe in phrase):
if (phrase.endswith(apostrophe)):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(apostrophe))
elif (bad_apostrophe in phrase):
if phrase.endswith(bad_apostrophe):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(bad_apostrophe))
else:
ne_phrase.set_feature(ne.is_apostrophed,-1)
return ne_phrase
# In[314]:
def punctuation_check(self,ne_phrase):
holder=[]
punctuation_holder=[]
flag_holder=[]
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
for i in range(len(phrase)):
if (phrase[i] in string.punctuation):
holder+=[i]
for i in holder:
if ((i<(len(phrase)-1)) & (phrase[i]=="'") & (phrase[i+1]=="s")):
flag_holder+=[False]
elif ((i==(len(phrase)-1)) & (phrase[i]=="'")):
flag_holder+=[False]
else:
flag_holder+=[True]
punctuation_holder+=[i]
#print(flag_holder)
ne_phrase.set_punctuation_holder(punctuation_holder)
if True in flag_holder:
ne_phrase.set_feature(ne.has_intermediate_punctuation,True)
else:
ne_phrase.set_feature(ne.has_intermediate_punctuation,False)
return ne_phrase
# In[315]:
def tense_check(self,ne_phrase):
words=(((ne_phrase.phraseText.strip()).rstrip(string.punctuation)).lower()).split()
verb_flag=False
adverb_flag=False
if (len(words)==1):
if words[0].endswith("ing"):
verb_flag=True
if words[0].endswith("ly"):
adverb_flag=True
ne_phrase.set_feature(ne.ends_like_verb,verb_flag)
ne_phrase.set_feature(ne.ends_like_adverb,adverb_flag)
return ne_phrase
# In[316]:
def capitalization_change(self,ne_element):
phrase=((ne_element.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()
val=-1
topic_indicator=False
p1= re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+[A-Za-z]+') #BREAKING: Toronto Raptors
p2= re.compile(r'([A-Z]{1}[a-z]+)+[^A-Za-z]*\s+[A-Z]{4,}') #The DREAMIEST LAND
match_lst1 = p1.findall(phrase)
match_lst2 = p2.findall(phrase)
if (match_lst1):
if not phrase.isupper():
p3=re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+')
val=list(p3.finditer(phrase))[-1].span()[1]
if(":" in phrase):
topic_indicator=True
ne_element.set_feature(ne.change_in_capitalization,val)
elif (match_lst2):
#print ("GOTIT2: "+phrase)
p3=re.compile(r'([A-Z]{1}[a-z]+)+')
val=list(p3.finditer(phrase))[-1].span()[1]
ne_element.set_feature(ne.change_in_capitalization,val)
else:
ne_element.set_feature(ne.change_in_capitalization,val)
ne_element.set_feature(ne.has_topic_indicator,topic_indicator)
return ne_element
def quoteProcess(self,unitQuoted, tweetWordList):
candidateString=""
retList=[]
matches=[]
quoteMatch=[]
final=[]
flag=False
#print(tweetWordList)
list1=list(map(lambda index: tweetWordList[index], unitQuoted))
candidateString=" ".join(list1)
#print("=>",candidateString)
# candidateString=""
# for index in range(len(unitQuoted)-1):
# candidateString+=tweetWordList[unitQuoted[index]]+" "
# candidateString+=tweetWordList[unitQuoted[-1]]
# print("=>",candidateString)
flagOne=False
flagTwo=False
flagThree=False
flagFour=False
p= re.compile(r'[^\S]*([\'].*?[\'])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\'].*?[\'])[^\s]*')
p2=re.compile(r'[^\s]*([\'].*?[\'])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagOne=True
if(not flagOne):
p= re.compile(r'[^\S]*([‘].*?[’])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([‘].*?[’])[^\s]*')
p2=re.compile(r'[^\s]*([‘].*?[’])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagTwo=True
if((not flagOne)&(not flagTwo)):
p= re.compile(r'[^\S]*([“].*?[”])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([“].*?[”])[^\s]*')
p2=re.compile(r'[^\s]*([“].*?[”])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagThree=True
if((not flagOne)&(not flagTwo)&(not flagThree)):
p= re.compile(r'[^\S]*([\"].*?[\"])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\"].*?[\"])[^\s]*')
p2=re.compile(r'[^\s]*([\"].*?[\"])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagFour=True
if (flagOne|flagTwo|flagThree|flagFour):
flag=True
for index in indices:
span= list(index.span())
#print(span[0])
quoteMatch.append([int(span[0]),int(span[1])])
matches+=[int(span[0]),int(span[1])]
#print(matches)
final+=[(candidateString[0:matches[0]],False)]
for i in range(len(matches)-1):
if([matches[i],matches[i+1]] in quoteMatch):
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),True)]
else:
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),False)]
final+=[(candidateString[matches[-1]:],False)]
final=list(filter(lambda strin: strin[0]!="",final))
final=list(map(lambda strin: (strin[0].strip(),strin[1]),final))
#print(final)
for unit in final:
lst=[]
unitsplit=list(filter(lambda unitString: unitString!='',unit[0].split()))
for splitunit in unitsplit:
lst+=[tweetWordList.index(splitunit,unitQuoted[0])]
retList+=[(lst,unit[1])]
else:
retList+=[(unitQuoted,False)]
#print(retList)
return retList
# In[318]:
def trueEntity_process(self,tweetWordList_cappos,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#returns list with position of consecutively capitalized words
#print(tweetWordList_cappos, tweetWordList)
output_unfiltered = self.consecutive_cap(tweetWordList_cappos,tweetWordList)
#print("==>",output_unfiltered)
#splitting at quoted units
output_quoteProcessed=[]
start_quote=[]
end_quote=[]
for unitQuoted in output_unfiltered:
unitout=self.quoteProcess(unitQuoted, tweetWordList)
#print("==>",unitout)
for elem in unitout:
mod_out=[]
out=elem[0]
flag=elem[1]
sflag=False
# '’”"
#print(out,flag)
if not (flag):
#for id in range(len(out)):
temp=[]
#print("::",out)
for index in out:
#print(index,tweetWordList[index])
word=(((tweetWordList[index].strip().strip('"“‘’”"')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print("=>"+word)"“‘’”"
if (word):
if (word in combined):
if(len(out)==1):
temp.append(index)
else:
if (word not in prep_list)&(word not in article_list):
temp.append(index)
else:
sflag=True
#else:
#if ((index==0)||()):
#temp.append(index)
# else:
# print("here")
# else:
# print("here")
#print(temp)
for elem in temp:
out.remove(elem)
#out[id]=temp
lst=[]
for k, g in groupby(enumerate(out), lambda elem: elem[1]-elem[0]):
lst=list(map(itemgetter(1), g))
#print("==>",lst)
if(lst):
mod_out.append((lst,sflag,flag))
#print('==>',mod_out)
else:
mod_out=[(out,sflag,flag)]
#print(mod_out)
#print(mod_out)
if(mod_out):
output_quoteProcessed.extend(mod_out)
#'cgl\print("=====>",output_quoteProcessed)
output= list(filter(lambda element: ((element[0]!=[0])&(element[0]!=[])), output_quoteProcessed))
#print(output)
#consecutive capitalized phrases
consecutive_cap_phrases1=list(map(lambda x: self.f(x[0],x[1],x[2],tweetWordList), output))
consecutive_cap_phrases=list(filter(lambda candidate:(candidate.phraseText!="JUST_DIGIT_ERROR"),consecutive_cap_phrases1))
#self.printList(consecutive_cap_phrases)
#implement the punctuation clause
ne_List_pc=self.flatten(list(map(lambda NE_phrase: self.punct_clause(NE_phrase), consecutive_cap_phrases)),[])
#self.printList(ne_List_pc)
#stopword removal and start-of-sentence
ne_List_pc_sr= list(map(lambda candidate: self.stopwordReplace(candidate), ne_List_pc))
ne_List_pc_checked= list(filter(lambda candidate: (candidate.position!=[0]), ne_List_pc_sr))
#implement title detection
#ne_List_titleCheck= list(map(lambda element: self.title_check(element), ne_List_pc_checked))
#implement slang check and remove
ne_List_slangCheck= list(filter(lambda element: not self.slang_remove(element), ne_List_pc_checked))
#implement apostrophe, tense and punctuation marker with final number check
#ne_List_apostropeCheck= list(map(lambda element: self.apostrope_check(element), ne_List_slangCheck))
#ne_List_punctuationCheck= list(map(lambda element: self.punctuation_check(element), ne_List_apostropeCheck))
ne_List_numCheck=list(filter(lambda candidate: not (candidate.phraseText.lstrip(string.punctuation).rstrip(string.punctuation).strip()).isdigit(), ne_List_slangCheck))
#ne_List_tenseCheck= list(map(lambda element: self.tense_check(element), ne_List_numCheck))
#tracking sudden change in capitalization pattern
#ne_List_capPatCheck= list(map(lambda element: self.capitalization_change(element), ne_List_tenseCheck))
#check on length
ne_List_lengthCheck= list(filter(lambda element: element.length<7, ne_List_numCheck))
ne_List_badWordCheck= list(filter(lambda element:((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”').lower()) not in combined, ne_List_lengthCheck))
ne_List_allCheck= list(filter(lambda element:(len((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”'))>1),ne_List_badWordCheck))
#ne_List_allCheck= list(filter(lambda element: (element.phraseText.lower() not in combined), ne_List_double_Check))
#q.put(ne_List_allCheck)
return ne_List_allCheck
#return ne_List_allCheck
# In[319]:
'''This is the main module. I am not explicitly writing it as a function as I am not sure what argument you are
passing.However you can call this whole cell as a function and it will call the rest of the functions in my module
to extract candidates and features
'''
'''#reads input from the database file and converts to a dataframe. You can change this part accordingly and
#directly convert argument tuple to the dataframe'''
#Inputs: Collection.csv 500Sample.csv 3.2KSample.csv eric_trump.csv
#df_out.to_csv('TweetBase500.csv')
#--------------------------------------PHASE I---------------------------------------------------
# In[ ]:
#--------------------------------------PHASE II---------------------------------------------------
'''set1 = set(['Melania','Trump'])
set2 = set(['Donald','Trump'])
set3 = set(['Jared','Kushner'])
m1 = MinHash(num_perm=200)
m2 = MinHash(num_perm=200)
m3 = MinHash(num_perm=200)
for d in set1:
m1.update(d.encode('utf8'))
for d in set2:
m2.update(d.encode('utf8'))
for d in set3:
m3.update(d.encode('utf8'))
# Create LSH index
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
lsh.insert("m3", m3)
result = lsh.query(m1)
print("Approximate neighbours with Jaccard similarity", result)
candidates=["donald trump","melania trump", "obama","barack obama","barack"]
listofMinhash=[]
m=MinHash(num_perm=200)
candidate0=set(candidates[0].split())
for d in candidate0:
m.update(d.encode('utf8'))
listofMinhash.append(m)
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
for candidate in candidates[1:]:'''
# In[ ]:
'''
print ("Shingling articles...")
# The current shingle ID value to assign to the next new shingle we
# encounter. When a shingle gets added to the dictionary, we'll increment this
# value.
curShingleID = 0
# Create a dictionary of the articles, mapping the article identifier (e.g.,
# "t8470") to the list of shingle IDs that appear in the document.
candidatesAsShingleSets = {};
candidateNames = []
t0 = time.time()
totalShingles = 0
for k in range(0, len(sorted_NE_container.keys())):
# Read all of the words (they are all on one line) and split them by white space.
words = list(sorted_NE_container.keys())[k].split(" ")
# Retrieve the article ID, which is the first word on the line.
candidateID = k
# Maintain a list of all document IDs.
candidateNames.append(candidateID)
# 'shinglesInDoc' will hold all of the unique shingle IDs present in the current document.
#If a shingle ID occurs multiple times in the document,
# it will only appear once in the set (this is a property of Python sets).
shinglesInCandidate = set()
# For each word in the document...
for index in range(0, len(words)):
# Construct the shingle text by combining three words together.
shingle = words[index]
# Hash the shingle to a 32-bit integer.
#crc = binascii.crc32("")
crc = binascii.crc32(bytes(shingle, encoding="UTF-8")) & (0xffffffff)
# Add the hash value to the list of shingles for the current document.
# Note that set objects will only add the value to the set if the set
# doesn't already contain it.
shinglesInCandidate.add(crc)
# Store the completed list of shingles for this document in the dictionary.
#print(str(words)+": ")
#for i in shinglesInCandidate:
# print('0x%08x' %i)
candidatesAsShingleSets[candidateID] = shinglesInCandidate
# Count the number of shingles across all documents.
totalShingles = totalShingles + (len(words))
# Report how long shingling took.
print ('\nShingling ' + str(str(len(sorted_NE_container.keys()))) + ' candidates took %.2f sec.' % (time.time() - t0))
print ('\nAverage shingles per doc: %.2f' % (totalShingles / len(sorted_NE_container.keys())))
'''
# In[ ]:
'''
# =============================================================================
# Generate MinHash Signatures
# =============================================================================
numHashes=20
numCandidates=len(sorted_NE_container.keys())
# Time this step.
t0 = time.time()
print ('Generating random hash functions...')
# Record the maximum shingle ID that we assigned.
maxShingleID = 2**32-1
nextPrime = 4294967311
# Our random hash function will take the form of:
# h(x) = (a*x + b) % c
# Where 'x' is the input value, 'a' and 'b' are random coefficients, and 'c' is
# a prime number just greater than maxShingleID.
# Generate a list of 'k' random coefficients for the random hash functions,
# while ensuring that the same value does not appear multiple times in the
# list.
def pickRandomCoeffs(k):
# Create a list of 'k' random values.
randList = []
while k > 0:
# Get a random shingle ID.
randIndex = random.randint(0, maxShingleID)
# Ensure that each random number is unique.
while randIndex in randList:
randIndex = random.randint(0, maxShingleID)
# Add the random number to the list.
randList.append(randIndex)
k = k - 1
return randList
# For each of the 'numHashes' hash functions, generate a different coefficient 'a' and 'b'.
coeffA = pickRandomCoeffs(numHashes)
coeffB = pickRandomCoeffs(numHashes)
print ('\nGenerating MinHash signatures for all candidates...')
# List of documents represented as signature vectors
signatures =np.ndarray(shape=(20, numCandidates))
# Rather than generating a random permutation of all possible shingles,
# we'll just hash the IDs of the shingles that are *actually in the document*,
# then take the lowest resulting hash code value. This corresponds to the index
# of the first shingle that you would have encountered in the random order.
# For each document...
for candidateID in candidateNames:
# Get the shingle set for this document.
shingleIDSet = candidatesAsShingleSets[candidateID]
# The resulting minhash signature for this document.
signature = []
# For each of the random hash functions...
for i in range(0, numHashes):
# For each of the shingles actually in the document, calculate its hash code
# using hash function 'i'.
# Track the lowest hash ID seen. Initialize 'minHashCode' to be greater than
# the maximum possible value output by the hash.
minHashCode = nextPrime + 1
# For each shingle in the document...
for shingleID in shingleIDSet:
# Evaluate the hash function.
hashCode = (coeffA[i] * shingleID + coeffB[i]) % nextPrime
# Track the lowest hash code seen.
if hashCode < minHashCode:
minHashCode = hashCode
# Add the smallest hash code value as component number 'i' of the signature.
signature.append(minHashCode)
# Store the MinHash signature for this document.
#signatures.append(signature)
signatures[:,candidateID]=signature
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print(list(np.shape(signatures)))
print ("\nGenerating MinHash signatures took %.2fsec" % elapsed)
#print ('\nsignatures stored in a numpy array...')
# Creates a N x N matrix initialized to 0.
# Time this step.
t0 = time.time()
# For each of the test documents...
for i in range(10, 11):
#for i in range(0, numCandidates):
print(list(sorted_NE_container.keys())[i]+": ",end="")
# Get the MinHash signature for document i.
signature1 = signatures[i]
# For each of the other test documents...
for j in range(0, numCandidates):
if(j!=i):
# Get the MinHash signature for document j.
signature2 = signatures[j]
count = 0
# Count the number of positions in the minhash signature which are equal.
for k in range(0, numHashes):
count = count + (signature1[k] == signature2[k])
# Record the percentage of positions which matched.
estJSim= (count / numHashes)
#print(estJSim)
if (estJSim>=0.5):
print("=>"+list(sorted_NE_container.keys())[j]+", ",end="")
print()
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print ("\nComparing MinHash signatures took %.2fsec" % elapsed)'''
# In[ ]:
'''cap_phrases="Trump:Russia,Afgha"
words=re.split('[,:]', cap_phrases)
print(words)
candidateString='"BS'
p= re.compile(r'(".*?")[^\s]*[\s]*')
indices= (list( p.finditer(candidateString) ))
matches=[]
final=[]
if(indices):
for index in indices:
span= list(index.span())
#print(span[0])
matches+=[int(span[0]),int(span[1])]
print(matches)
final+=[candidateString[0:matches[0]]]
for i in range(len(matches)-1):
final+=[(candidateString[matches[i]:matches[i+1]]).strip()]
final+=[candidateString[matches[-1]:]]
final=list(filter(lambda strin: strin!="",final))
final=list(map(lambda strin: strin.strip(),final))
print(final)'''
# tweets=pd.read_csv("deduplicated_test.csv", header=0, index_col = 0 ,encoding = 'utf-8',delimiter=';')
# tweets=tweets[:1000:]
# Phase1= SatadishaModule()
# for i in range(2):
# Phase1= SatadishaModule()
# Phase1.extract(tweets,1)
|
rfswitch.py
|
"""rf433 device proxy code."""
import collections
import logging
import threading
import Queue
import rcswitch
from pi import proxy
class RFSwitch(proxy.Proxy):
"""433mhz RF Switch proxy implementation."""
def __init__(self, pin, repeats=5):
self._switch = rcswitch.RCSwitch()
self._switch.enableTransmit(pin)
# We repeat commands to devices, as 433Mhz switches
# are not super reliable.
self._repeats = repeats
# We put commands (system code, device code, mode, repeat count)
# on a queue, and then process them on a background thread
# so we don't block the main event loop and can balance
# new commands and repeats more fairy.
self._command_queue = Queue.Queue()
self._exiting = False
self._command_thread = threading.Thread(target=self._command_thread_loop)
self._command_thread.daemon = True
self._command_thread.start()
# To prevent repeats for a given device competeing
# with new states for the same device, we give commands
# generation numbers, and if we see a command for a device
# from an old generation, we ignore it.
self._device_command_generations = collections.defaultdict(int)
@proxy.command
def set_state(self, system_code, device_code, mode):
"""Handle rf swtich events - turn it on or off."""
system_code = str(system_code)
device_code = int(device_code)
self._device_command_generations[(system_code, device_code)] += 1
generation = self._device_command_generations[(system_code, device_code)]
self._command_queue.put((system_code, device_code, mode,
self._repeats, generation))
def _command_thread_loop(self):
while not self._exiting:
command = self._command_queue.get(True)
if command is None:
return
system_code, device_code, mode, repeats, generation = command
if generation < self._device_command_generations[
(system_code, device_code)]:
continue
logging.info('system_code = %s, device_code = %s, '
'mode = %s, repeats = %d, generation = %d',
system_code, device_code, mode, repeats,
generation)
if mode:
self._switch.switchOn(system_code, device_code)
else:
self._switch.switchOff(system_code, device_code)
# Put back on queue if needs repeating
repeats -= 1
if repeats > 0:
self._command_queue.put((system_code, device_code, mode,
repeats, generation))
def stop(self):
self._exiting = True
self._command_queue.put(None)
def join(self):
self._command_thread.join()
|
record.py
|
# coding: utf-8
import glob
import os
import shlex
import subprocess
import threading
import time
import traceback
import mss
from PIL import Image
from base.config.config import Config
from base.logs.log import Log4Kissenium
from base.tools.platform import Platform
from base.tools.sm_tools import SmallTools
class Record(threading.Thread):
stop_recording = False
scenario = ""
def __init__(self, scenario, test):
# Note: if we wan't to record distant execution of kissenium (not implemented for now), we could think of using
# vnc server on the remote executor
threading.Thread.__init__(self)
self.scenario = scenario
self.reports_folder = SmallTools.get_reports_folder(self.scenario)
self.test = test
self.cancelled = False
self.config = Config()
self.logger = Log4Kissenium.get_logger("Kissenium")
def start(self):
try:
t = threading.Thread(name='ScreenRecorder', target=self.record_screen)
t.start()
except Exception as e:
self.logger.error("Threading exception")
self.logger.error(e)
self.logger.error(traceback.format_exc())
def record_screen(self):
# The mac solution might be the nicest solution for every system
# To be more tested
if Platform.get_os() == "mac":
self.logger.info("Record video on mac")
self.ffmpeg_record_mac()
self.ffmpeg_merge_tmp_videos()
else:
self.logger.info("Record video on linux or windows")
with mss.mss() as sct:
i = 0
while not self.stop_recording:
th = threading.Thread(name='ScreenRecorderCapture', target=self.take_captures(sct, i))
th.start()
i += 1
self.generate_video()
self.clean_tmp()
def stop(self):
self.stop_recording = True
def generate_video(self):
try:
filelist = sorted(glob.glob("reports/tmp/" + self.test + "-*.png"))
last_image = max(filelist, key=os.path.getctime)
os.system('ffmpeg -loglevel panic -hide_banner -nostats -f image2 -framerate 8 -i reports/tmp/' + self.test
+ '-%06d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p reports/tmp/' + self.test + '_body.avi')
os.system('ffmpeg -loglevel panic -hide_banner -nostats -loop 1 -t 1 -i ' + last_image
+ ' -c:v libx264 -vf "format=yuv420p" reports/tmp/' + self.test + '_lastimg.mp4')
os.system('ffmpeg -loglevel panic -hide_banner -nostats -i "concat:reports/tmp/' + self.test
+ '_body.avi|reports/tmp/' + self.test + '_lastimg.mp4" -c copy ' + self.reports_folder
+ self.test + '.mp4')
except Exception as e:
self.logger.error(e)
self.logger.error(traceback.format_exc())
def take_captures(self, sct, i):
try:
sct.shot(output='reports/tmp/' + self.test + '-' + "{0:0=6d}".format(i) + '.png')
except Exception as e:
self.logger.error(e)
self.logger.error(traceback.format_exc())
def clean_tmp(self):
try:
g = glob.glob("reports/tmp/" + self.test + "*")
SmallTools.delete_from_glob(g)
except Exception as e:
self.logger.error(e)
self.logger.error(traceback.format_exc())
def ffmpeg_record_mac(self):
try:
i = 0
while not self.stop_recording:
command = 'ffmpeg -loglevel panic -hide_banner -nostats -f avfoundation -i "1" -c:v libx264 -vf '\
+ '"format=yuv420p" -r 25 -t 2 reports/tmp/' \
+ self.test + '-' + str("{0:0=4d}".format(i)) + '.avi'
arguments = shlex.split(command)
subprocess.Popen(arguments)
time.sleep(2)
i += 1
except Exception as e:
self.logger.error(e)
self.logger.error(traceback.format_exc())
def ffmpeg_merge_tmp_videos(self):
try:
if Platform.get_os() == "mac":
vl = ""
videolist = sorted(glob.glob('reports/tmp/' + self.test + '-*.avi'))
for video in videolist:
s = "" if vl == "" else "|"
vl += s + video
self.logger.debug(vl)
os.system('ffmpeg -loglevel panic -hide_banner -nostats -i "concat:' + vl + '" -c copy '
+ self.reports_folder + self.test + '.avi')
else:
self.logger.error('Not handled for now.')
except Exception as e:
self.logger.error(e)
self.logger.error(traceback.format_exc())
|
server_socket.py
|
from socket import gethostbyname, gethostname, SHUT_WR, socket, SOL_SOCKET, SO_REUSEADDR, AF_INET, SOCK_STREAM
from threading import Thread
import sys
class ServerSocket:
"""
A Class to represent Server Socket.
"""
def __init__(self, port: int,db,model) -> None:
# Creating an INET , STREAMing socket
self.SERVER = socket(AF_INET, SOCK_STREAM)
# make the server reusable
self.SERVER.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.IP = '' # if on linux or hosting platfor
if sys.platform == "win32" or sys.platform == "darwin": # if on windows or macOS
self.IP = gethostbyname(gethostname())
self.PORT = port
self.ADDR = (self.IP, self.PORT)
self.BUFF_SIZE = 32
self.FORMAT = 'utf-8'
self.clients = [] # list of clients connected to server at the moment
self.startServer(db,model)
def broadcast(self, msg: bytes) -> None:
"""
message consists of first 32(self.BUFF_SIZE) characters containing length of the message and next part is the message
"""
message = (f"{len(msg):<{self.BUFF_SIZE}}" +
msg.decode(self.FORMAT, 'ignore'))
for client in self.clients:
try:
# keep sending byte by byte
client.send(message.encode(self.FORMAT, 'ignore'))
except Exception as e:
print(f'[BROADCAST ERROR] {e}')
def handleClient(self, conn: socket,db,model) -> None:
"""
A function to be threaded for each client.
The function Receives a message from the respective client and broadcasts it to all the clients
Parameter
---------
conn : socket
client socket representative at server side
"""
print(f"[NEW CONNECTION]")
# try:
while True:
# receiving the length of message
msglen = conn.recv(self.BUFF_SIZE).decode(
self.FORMAT, 'ignore')
if msglen: # if received something
# try:
# receive rest of the message
msg = conn.recv(int(msglen))
if msg:
text=msg.decode(self.FORMAT,'ignore')
username = text[:text.index(':=>')]
message = text[text.index(':=>')+3:]
db.session.add(model(username=username,message=message))
db.session.commit()
self.broadcast(msg)
# except Exception as e:
# print(f'[HANDLECLIENT ERROR]{e}')
# except Exception as e:
# # Close the connection from erver side if client closes connection from client side
# print(f"[CONNECTION LOST] User: Connection Lost\n[EXCEPTION] {e}")
# conn.shutdown(SHUT_WR)
# conn.close()
# # remove client from clients list
# self.clients.remove(conn)
def accept_connections(self,db,model):
"""
A function to accept connections forever
"""
while True:
conn, addr = self.SERVER.accept()
self.clients.append(conn)
# making thread for each client and starting it
client = Thread(target=self.handleClient, args=[conn,db,model])
client.start()
def startServer(self,db,model):
self.SERVER.bind(self.ADDR)
self.SERVER.listen()
print(f"LISTENING FOR CONNECTIONS AT ({self.IP},{self.PORT})")
# Make a thead to accept connections and return
Thread(target=self.accept_connections,args=[db,model]).start()
# accepting conditions and other parts of program will occur simultaneously
|
multiprocess_iterator.py
|
from __future__ import division
from collections import namedtuple
import multiprocessing
from multiprocessing import sharedctypes
import signal
import sys
import threading
import warnings
import numpy
import six
from chainer.dataset import iterator
_response_time = 1.
_short_time = 0.001
_PrefetchState = namedtuple('_PrefetchState', (
'current_position', 'epoch', 'is_new_epoch',
'previous_epoch_detail', 'order'))
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~chainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset (~chainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
"""
_interruption_testing = False # for testing
def __init__(self, dataset, batch_size, repeat=True, shuffle=True,
n_processes=None, n_prefetch=1, shared_mem=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self._finalized = False
self._comm = _Communicator(self.n_prefetch)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat, self.shuffle,
self.n_processes, self.n_prefetch, self.shared_mem, self._comm,
self._interruption_testing)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
self._thread = None
def __next__(self):
measure_mode = False
if self._thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, prefetch_state = self._prefetch_loop.measure()
self._thread = self._prefetch_loop.launch_thread()
del self._prefetch_loop
if not measure_mode:
batch, prefetch_state = self._comm.get()
(self.current_position, self.epoch, self.is_new_epoch,
self._previous_epoch_detail, self._order) = prefetch_state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def __del__(self):
# When `self.__del__()` is called, `self.__init__()` may not be
# finished. So some attributes may be undefined.
if not hasattr(self, '_finalized'):
# We don't know how to finalize this uninitialized object
return
if not hasattr(self, '_comm'):
self._comm = None
if not hasattr(self, '_thread'):
self._thread = None
if self._finalized:
return
self._finalized = True
if self._comm is None:
return
self._comm.terminate()
if self._thread is None:
return
while self._thread.is_alive():
self._thread.join(_response_time)
finalize = __del__
def __copy__(self):
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, self.shuffle,
self.n_processes, self.n_prefetch, self.shared_mem)
other.current_position = self.current_position
other.epoch = self.epoch
other.is_new_epoch = self.is_new_epoch
other._previous_epoch_detail = self._previous_epoch_detail
other._order = self._order
other._set_prefetch_state()
return other
@property
def epoch_detail(self):
return self.epoch + self.current_position / len(self.dataset)
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
try:
serializer('order', self._order)
except KeyError:
serializer('_order', self._order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / len(self.dataset)
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
self._set_prefetch_state()
def reset(self):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
# use -1 instead of None internally.
self._previous_epoch_detail = -1.
if self.shuffle:
self._order = numpy.random.permutation(len(self.dataset))
else:
self._order = None
self._set_prefetch_state()
def _set_prefetch_state(self):
prefetch_state = _PrefetchState(
current_position=self.current_position,
epoch=self.epoch,
is_new_epoch=self.is_new_epoch,
previous_epoch_detail=self._previous_epoch_detail,
order=self._order)
self._comm.reset(prefetch_state)
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch):
self.n_prefetch = n_prefetch
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
while len(self._batch_queue) == 0:
self._not_empty_cond.wait(_response_time)
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
def __init__(self, dataset, batch_size, repeat, shuffle,
n_processes, n_prefetch, mem_size, comm,
_interruption_testing):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes
self.mem_size = mem_size
self.comm = comm
self._allocate_shared_memory()
self._pool = None
# Use a distinct RandomState in the thread
# for deterministic random number generation.
# To support 32-bit platform and numpy < 1.11,
# the seed is taken in a verbose manner.
seed = numpy.asscalar(
numpy.random.randint(-(1 << 31), 1 << 31, 1).astype('uint32'))
self._random = numpy.random.RandomState(seed)
self._interruption_testing = _interruption_testing
def measure_required(self):
return self.mem_size is None
def measure(self):
status, prefetch_state, _ = self.comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
indices = self._proceed()
if indices is None: # stop iteration
batch = None
else:
batch = [self.dataset[idx] for idx in indices]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk))
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
return thread
def _run(self):
alive = True
try:
while alive:
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
status, prefetch_state, reset_count = self.comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
indices = self._proceed()
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self.comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self.comm.put(batch, self.prefetch_state, reset_count)
return True
def _proceed(self):
n = len(self.dataset)
(pos, epoch, is_new_epoch,
previous_epoch_detail, order) = self.prefetch_state
if pos < self.batch_size and epoch > 0 and not self.repeat:
return None # stop iteration
previous_epoch_detail = epoch + pos / n
new_pos = pos + self.batch_size
if new_pos < n:
if order is None:
indices = numpy.arange(pos, new_pos)
else:
indices = order[pos:new_pos]
is_new_epoch = False
else:
new_pos = new_pos - n if self.repeat else 0
if order is None:
indices = numpy.arange(pos, n)
if self.repeat:
indices = \
numpy.concatenate((indices, numpy.arange(new_pos)))
else:
indices = order[pos:n]
if self.repeat:
order = self._random.permutation(n)
indices = \
numpy.concatenate((indices, order[:new_pos]))
epoch += 1
is_new_epoch = True
self.prefetch_state = _PrefetchState(
new_pos, epoch, is_new_epoch,
previous_epoch_detail, order)
return indices
# Using `parametarized` funciton (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
|
video_recorder.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import signal
import subprocess
from tempfile import mktemp
from threading import Thread
import time
LOG = logging.getLogger(__name__)
class VideoRecorder(object):
def __init__(self, width, height, display='0.0', frame_rate=15):
self.is_launched = False
self.file_path = mktemp() + '.mp4'
# ffmpeg -video_size 1921x1080 -framerate 15 -f x11grab -i :0.0
self._cmd = ['ffmpeg',
'-video_size', '{}x{}'.format(width, height),
'-framerate', str(frame_rate),
'-f', 'x11grab',
'-i', ':{}'.format(display),
self.file_path]
def start(self):
if self.is_launched:
LOG.warning('Video recording is running already')
return
if not os.environ.get('FFMPEG_INSTALLED', False):
LOG.error("ffmpeg isn't installed. Video recording is skipped")
return
fnull = open(os.devnull, 'w')
LOG.info('Record video via %s', ' '.join(self._cmd))
self._popen = subprocess.Popen(self._cmd, stdout=fnull, stderr=fnull)
self.is_launched = True
def stop(self):
if not self.is_launched:
LOG.warning('Video recording is stopped already')
return
self._popen.send_signal(signal.SIGINT)
def terminate_avconv():
limit = time.time() + 10
while time.time() < limit:
time.sleep(0.1)
if self._popen.poll() is not None:
return
os.kill(self._popen.pid, signal.SIGTERM)
t = Thread(target=terminate_avconv)
t.start()
self._popen.communicate()
t.join()
self.is_launched = False
def clear(self):
if self.is_launched:
LOG.error("Video recording is running still")
return
if not os.path.isfile(self.file_path):
LOG.warning("%s is absent already", self.file_path)
return
os.remove(self.file_path)
|
twitch.py
|
import os
import json
import webbrowser
from threading import Thread
import time
from PyQt5.QtWidgets import QFileDialog, QTextEdit, QHBoxLayout, QFormLayout, QHeaderView, QTabWidget, QCheckBox, QGridLayout, QComboBox, QLineEdit, QLabel, QApplication, QWidget, QPushButton, QVBoxLayout, QTableWidget, QTableWidgetItem
from modules.twitch import Commands, TwitchIntegration
CMD_NAMES = {
Commands.INFO: "Information (info)",
Commands.COMMANDS: "List Commands (commands)",
Commands.TOP_LOOTS: "Top Loots (toploots)",
Commands.ALL_RETURNS: "All Returns (allreturns)"
}
class TwitchTab(QWidget):
def __init__(self, app: "LootNanny", config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app = app
self.command_toggles = {}
self.create_layout()
# Bot
self.twitch_bot = None
self.twitch_bot_thread = None
# Finalize Initialization
self.validate_settings()
def to_config(self):
return {
"token": self.oauth_token,
"username": self.username,
"channel": self.channel,
"prefix": self.command_prefix,
"commands_enabled": list(map(lambda c: c.value, self.commands_enabled))
}
def create_layout(self):
layout = QVBoxLayout()
form_inputs = QFormLayout()
layout.addLayout(form_inputs)
# Chat Location
self.oauth_token_text = QLineEdit(self.app.config.twitch_token.ui_value)
self.oauth_token_text.editingFinished.connect(self.on_settings_changed)
form_inputs.addRow("OAuth Token:", self.oauth_token_text)
btn = QPushButton("Get New OAuth Token:")
btn.released.connect(lambda: webbrowser.open("https://twitchapps.com/tmi"))
form_inputs.addWidget(btn)
self.username_text = QLineEdit(self.app.config.twitch_username.ui_value, enabled=False)
self.username_text.editingFinished.connect(self.on_settings_changed)
form_inputs.addRow("Bot Name:", self.username_text)
self.channel_text = QLineEdit(self.app.config.twitch_channel.ui_value)
self.channel_text.editingFinished.connect(self.on_settings_changed)
form_inputs.addRow("Channel:", self.channel_text)
self.command_prefix_text = QLineEdit(self.app.config.twitch_prefix.ui_value)
self.command_prefix_text.editingFinished.connect(self.on_settings_changed)
form_inputs.addRow("Command Prefix:", self.command_prefix_text)
for i, cmd in enumerate(Commands):
widget = QCheckBox(CMD_NAMES[cmd.value], self)
widget.setChecked(cmd in self.app.config.twitch_commands_enabled.value)
layout.addWidget(widget)
widget.toggled.connect(self.on_commands_toggled)
self.command_toggles[cmd] = widget
layout.addStretch()
self.start_btn = QPushButton("Start Twitch Bot:", enabled=False)
self.start_btn.released.connect(self.start_twitch_bot)
form_inputs.addWidget(self.start_btn)
self.setLayout(layout)
def start_twitch_bot(self):
self.start_btn.setEnabled(False)
self.start_btn.setText("Restart App To Start Twitch Bot Again :( (Work in progress)")
if self.twitch_bot is not None:
# Kill old twitch bot
return # TODO: This is harder than I first intneded to do cleanly, maybe need a daemon process :(
print("Starting twitch bot")
self.twitch_bot = TwitchIntegration(
self.app,
username=self.username,
token=self.oauth_token,
channel=self.channel,
command_prefix=self.command_prefix
)
self.twitch_bot_thread = Thread(target=self.twitch_bot.start, daemon=True)
self.twitch_bot_thread.start()
def on_settings_changed(self):
self.app.config.twitch_token = self.oauth_token_text.text()
self.app.config.twitch_username = self.username_text.text()
self.app.config.twitch_channel = self.channel_text.text()
self.app.config.twitch_prefix = self.command_prefix_text.text()
self.validate_settings()
self.app.save_config()
def validate_settings(self):
if all([
self.app.config.twitch_token.value,
self.app.config.twitch_username.value,
self.app.config.twitch_channel.value,
self.app.config.twitch_prefix.value
]):
self.start_btn.setEnabled(True)
else:
self.start_btn.setEnabled(False)
def on_commands_toggled(self):
for command, checkbox in self.command_toggles.items():
checkbox: QComboBox
if checkbox.isChecked():
self.app.config.twitch_commands_enabled.value.add(command)
else:
self.app.config.twitch_commands_enabled.value.discard(command)
self.app.save_config()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 37014
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
ibuddy_usbapi.py
|
"""
USBAPI - a simple API for driving the USB attached iBuddy
USAGE
To use this you need to add pywinusb to your Python installation
- https://pypi.python.org/pypi/pywinusb/
Then you can drive this from the Python console as follows:
>>> import usbapi as usb
>>> devices = usb.usbapi().getDevices()
That gets you an array of iBuddy devices.
If you are in the lucky position of having multiple iBuddy devices attached, then you probably want to first do:
>>> devices.identify()
which will make the devices identify themselves in order, so that you can make their physical order match their logical order, either by physically moving them, or by using
>>> devices = devicesde.reorder([3, 1, 4, 2])
(Note that reorder takes the devices as being numbered from 1 not the normal Python convention of zero).
You can then give all devices commands as follows:
>>> devices.color("red")
>>> devices.color("green")
>>> devices.fly()
Actions can be strung together thus
>>> devices.color("red").delay().color("green").delay().color("blue")
All actions get queued up, and you may need to add specific delay calls between actions, so that you see all three colors above, not just the last one issued.
Of course you can issue different commands to different devices
>>> devices[0].color("white")
>>> devices[1].color("cyan")
If you want to execute a sequence, then the play command makes it easy, with its single character based command language:
>>> devices.play("rgbdpzcymn", 0.5)
which will run through the colors red, green, and blue, then do a dance, pulse the heart, create a buzz, and then try the other colors cyan, yellow, magenta, before ending up with the "nothing" color.
There's plenty more to the api - but I'm afraid the code is the docS. Happy hunting!
"""
import pywinusb.hid as hid
import time
import Queue
import threading
# Constants for use in the api
class Constants:
red = 6
green = 5
blue = 3
cyan = 1
magenta = 2
yellow = 4
white = 0
nothing = black = 7
twain = 0 # both left and right, or push and pull
left = pull = 1
right = push = 2
neutral = rest = 3
show = 0
hide = 1
colorInitials = "wcmbygrn"
class UsbDevices(list, Constants):
# List subclassing as described at http://stackoverflow.com/questions/8180014/how-to-subclass-python-list-without-type-problems
def __getslice__(self,i,j):
return UsbDevices(list.__getslice__(self, i, j))
def __add__(self,other):
return UsbDevices(list.__add__(self,other))
def __mul__(self,other):
return UsbDevices(list.__mul__(self,other))
def __getitem__(self, item):
result = list.__getitem__(self, item)
try:
return UsbDevices(result)
except TypeError:
return result
# This batch of methods are only used on list of devices
def list(self):
for usbdevice in self:
device = usbdevice.device
print "%s pID=0x%04x %s" \
% (device.product_name, device.product_id, device.device_path)
def identify(self):
"""Run through the devices in sequence to identity them"""
for i, device in enumerate(self, start = 1):
device.color(i).pulse(number = i, delay = 0.7)
for device in self:
device.wait()
for device in self:
device.heart(device.show).dance().wait().heart(device.hide)
def reorder(self, sequence):
reordered = UsbDevices()
for i in self:
reordered.append(None)
starter = sequence[0]
for i in sequence:
starter = min(starter, i)
for i, value in enumerate(sequence):
reordered[value - starter] = self[i]
return reordered
def chase(self):
limit = len(self)
colors = [self.white]
if limit > 2:
colors.append(self.cyan)
if limit > 3:
colors.append(self.blue)
colors.append(self.black)
colorMask = self[0].colorMask
colorShift = self[0].colorShift
when = time.time()
for i in range(100):
for c, color in enumerate(colors):
self[(i - c) % limit].queue.put((when, colorMask, color << colorShift))
when += 0.7
# This batch of methods mirror the methods on a single device
def color(self, color):
for device in self:
device.color(color)
return self
def heart(self, heart):
for device in self:
device.heart(heart)
return self
def turn(self, turn):
for device in self:
device.turn(turn)
return self
def flap(self, flap):
for device in self:
device.flap(flap)
return self
def dance(self, number = 10, delay = 0.2):
for device in self:
device.dance(number, delay)
return self
def fly(self, number = 10, delay = 0.1):
for device in self:
device.fly(number, delay)
return self
def buzz(self, number = 30, delay = 0.02, rise = 0):
for device in self:
device.buzz(number, delay, rise)
return self
def pulse(self, number = 30, delay = 0.1):
for device in self:
device.pulse(number, delay)
return self
def flash(self, number = 30, delay = 0.01, color = None):
for device in self:
device.flash(number, delay, color)
return self
def wait(self):
for device in self:
device.wait()
return self
def reset(self):
for device in self:
device.reset()
return self
def discard(self):
for device in self:
device.discard()
return self
def delay(self, delay = 1):
for device in self:
device.delay(delay)
return self
def play(self, string, delay = 0.1):
for c in string:
if self.colorInitials.find(c) != -1:
self.color(c)
elif c == '<':
# turn left
self.turn(self.left)
elif c == '>':
# turn right
self.turn(self.right)
elif c == 'v':
# pull wings in
self.flap(self.pull)
elif c == '^':
# push wings out
self.flap(self.push)
elif c == '!':
# show heart
self.heart(self.show)
elif c == '.':
# hide heart
self.heart(self.hide)
elif c == 'x':
# hide heart
self.reset()
elif c == 'd':
# dance
self.dance()
elif c == 'f':
# fly
self.fly()
elif c == 'z':
# buzz
self.buzz()
elif c == 'p':
# pulse
self.pulse()
elif c == ',':
# this is just a pause
pass
elif c == '+':
# this gives no delay, so makes two actions happen at once
continue
elif c == '/':
# halve the delay
delay = delay / 2.0
continue
elif c == '*':
# double the delay
delay = delay * 2.0
continue
self.delay(delay)
return self
class usbapi(Constants):
"""Class for providing access to USB notifier type objects"""
# def __init__(self):
# self.data = []
def getDevices(self):
# find all the i-buddy devices
dev_filter = hid.HidDeviceFilter(vendor_id = 0x1130)
hiddevices = dev_filter.get_devices()
# Select just the devices we can interact with
devices = UsbDevices()
for device in hiddevices:
# Each phsical device shows up twice, we just want the '01' labelled one
if '&mi_01#' in device.device_path:
usbdev = usbdevice(device)
usbdev.index = len(devices)
devices.append(usbdev)
self.devices = devices
return self.devices
class usbdevice(Constants):
def __init__(self, device):
self.device = device
self.lock = threading.Lock()
# assume we start from all off
self.value = 0xff
# we pick a preferred move
self.move = 1
self.queue = Queue.PriorityQueue()
self.queue.device = self
self.when = 0
t = threading.Thread(target=self.worker, args=(self.queue,))
t.daemon = True
t.start()
def __str__(self):
color = (self.value & ~self.colorMask) >> self.colorShift
heart = (self.value & ~self.heartMask) >> self.heartShift
flap = (self.value & ~self.flapMask) >> self.flapShift
turn = (self.value & ~self.turnMask) >> self.turnShift
#print color, heart, flap, turn
return str(self.index) + ') ' + self.colors[color] + ' ' + self.hearts[heart] + ' ' + self.flaps[flap] + ' ' + self.turns[turn] + ' ' + str(self.queue.qsize()) + ' queued'
def __repr__(self):
return str(self)
colors = ['white', 'cyan', 'magenta', 'blue', 'yellow', 'green', 'red', 'nothing']
flaps = ['flaps both', 'pull', 'push', '']
turns = ['turn both', 'left', 'right', '']
hearts = ['heart', '']
colorMask = 0x8f
colorShift = 4
heartMask = 0x7f
heartShift = 7
turnMask = 0xfc
turnShift = 0
flapMask = 0xf3
flapShift = 2
def output(self, value=None):
if value == None:
value = self.value
try:
self.lock.acquire()
self.device.open()
reports = self.device.find_output_reports()
out_report = reports[0]
data = (0x00, 0x55, 0x53, 0x42, 0x43, 0x00, 0x40, 0x02, value)
out_report.send(data)
self.value = value
finally:
self.device.close()
self.lock.release()
def assemble(self, color, heart, turn, flap):
return (color << self.colorShift) + (heart << self.heartShift) + (turn) + (flap << self.flapShift)
# Actions
def color(self, color):
"""Set the color of the main indicator"""
if isinstance(color, basestring):
# If color is passed as a string, we take its initial letter as
# defining the color we want
color = self.colorInitials.find(color[0])
color = sorted((0, color, 7))[1]
self.add(self.colorMask, self.colorShift, color)
return self
def heart(self, heart):
"""Set the secondary (heart) indicator"""
heart = 0 if heart == 0 else 1
self.add(self.heartMask, self.heartShift, heart)
return self
def turn(self, turn):
"""Turn the body"""
turn = sorted((self.twain, turn, self.neutral))[1]
if turn == self.left or turn == self.right:
move = turn
self.add(self.turnMask, self.turnShift, turn)
return self
def flap(self, flap):
"""Move the wings"""
flap = sorted((self.twain, flap, self.neutral))[1]
self.add(self.flapMask, self.flapShift, flap)
return self
def dance(self, number = 10, delay = 0.2):
"""Dance turning left and right"""
for each in range(number):
self.add(self.turnMask, self.turnShift, self.left, delay)
self.add(self.turnMask, self.turnShift, self.right, delay)
self.add(self.turnMask, self.turnShift, self.neutral)
return self
def fly(self, number = 10, delay = 0.1):
"""Flap the wings multiple times"""
for each in range(number):
self.add(self.flapMask, self.flapShift, self.pull, delay)
self.add(self.flapMask, self.flapShift, self.push, delay)
self.add(self.flapMask, self.flapShift, self.neutral)
return self
def buzz(self, number = 30, delay = 0.02, rise = 0):
"""Make a buzzing sound"""
# We try and not disturb the turn by turning back to the last move
turn = self.move
for each in range(number):
self.add(self.turnMask, self.turnShift, turn)
self.add(self.turnMask, self.turnShift, turn ^ 3, delay)
self.add(self.turnMask, self.turnShift, self.neutral, delay)
delay += rise
return self
def pulse(self, number = 30, delay = 0.1):
"""Pulse the secondary indicator (heart)"""
return self.sequence(number, delay, mask = self.heartMask, shift = self.heartShift)
def flash(self, number = 30, delay = 0.01, color = None):
"""Flash the main indicator"""
color = color if color != None else self.red
return self.sequence(number, delay, mask = self.colorMask, shift = self.colorShift, on = color, off = self.black)
def wait(self):
"""wait for all outstanding actions to complete"""
self.queue.join()
return self
def reset(self):
"""Instantly clears any queued items, and resets back to all off"""
self.discard()
self.output(0xff)
self.when = 0
return self
def discard(self):
"""Clears any queued items"""
try:
while True:
item = self.queue.get_nowait()
self.queue.task_done()
except:
pass
return self
def play(self, string, delay = 0.1):
"""Play a sequence of actions described in a string"""
devices = UsbDevices()
devices.append(self)
devices.play(string, delay)
return self
def delay(self, delay):
"""The next action will be delayed by this amount from the previous one"""
self.when += delay
return self
def mix(self, color1 = 6, color2 = 4, number = 1000):
"""Mix two colors together, to try and create an intermediate one - doesn't work very well"""
for each in range(number):
self.output(value = color1 << self.colorShift)
self.output(value = color2 << self.colorShift)
return self
# Internal helper actions
def sequence(self, number, delay, mask, shift, on = 0, off = 1):
"""Helper function"""
for each in range(number):
self.add(mask, shift, on, delay)
self.add(mask, shift, off, delay)
return self
def schedule(self):
now = time.time()
if self.when < now:
self.when = now
return self
def add(self, mask, shift, value, delay = 0):
self.schedule()
self.queue.put((self.when, mask, value << shift))
self.delay(delay)
return self
# Private worker thread, not part of the api
def worker(self, queue):
while True:
item = queue.get()
when, mask, value = item
delay = when - time.time()
if delay > 0:
time.sleep(delay)
self.value = (self.value & mask) + value
self.output()
queue.task_done()
|
DataGen_Transformer_split_IDMap.py
|
import csv
import os
import sys
import shutil
import time
import numpy as np
import scipy.io as sio
import yaml
import argparse
from easydict import EasyDict
from os.path import dirname, realpath, pardir
from hashids import Hashids
import hashlib
sys.path.append(os.path.join(dirname(realpath(__file__)), pardir))
import utils.graphUtils.graphTools as graph
# from utils.graphUtils.graphTools import isConnected
# from dataloader.statetransformer import AgentState
# from dataloader.statetransformer_localGuidance import AgentState
# from dataloader.statetransformer_localGuidance_SDObs import AgentState
# from dataloader.statetransformer_localGuidance_SemiLocal import AgentState
from dataloader.statetransformer_Guidance import AgentState
# from dataloader.statetransformer_globalGuidance import AgentState
from scipy.spatial.distance import squareform, pdist
from multiprocessing import Queue, Process
parser = argparse.ArgumentParser("Input width and #Agent")
parser.add_argument('--num_agents', type=int, default=4)
parser.add_argument('--map_w', type=int, default=10)
parser.add_argument('--map_density', type=float, default=0.1)
parser.add_argument('--dir_SaveData', type=str, default='../MultiAgentDataset/DataSource_DMap_LG')
parser.add_argument('--loadmap_TYPE', type=str, default='map')
parser.add_argument('--solCases_dir', type=str, default='../MultiAgentDataset/Solution_DMap')
parser.add_argument('--chosen_solver', type=str, default='ECBS')
parser.add_argument('--id_start', type=int, default=0)
parser.add_argument('--div_train', type=int, default=21000)
parser.add_argument('--div_valid', type=int, default=200)
parser.add_argument('--div_test', type=int, default=4500)
parser.add_argument('--div_train_IDMap', type=int, default=0)
parser.add_argument('--div_test_IDMap', type=int, default=427)
parser.add_argument('--div_valid_IDMap', type=int, default=800)
parser.add_argument('--maxNum_Map', type=int, default=1000)
parser.add_argument('--div_extend_valid', type=int, default=0)
parser.add_argument('--FOV', type=int, default=9)
parser.add_argument('--guidance', type=str, default='')
parser.add_argument('--dynamic_commR', action='store_true', default=False)
parser.add_argument('--symmetric_norm', action='store_true', default=False)
parser.add_argument('--commR', type=int, default=7)
args = parser.parse_args()
class DataTransformer:
def __init__(self, config):
self.config = config
self.PROCESS_NUMBER = 4
self.num_agents = self.config.num_agents
self.size_map = [self.config.map_w, self.config.map_w]
self.label_density = str(self.config.map_density).split('.')[-1]
self.AgentState = AgentState(self.config)
self.communicationRadius = self.config.commR # communicationRadius
self.zeroTolerance = 1e-9
self.delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1], # go right
[0, 0]] # stop
self.num_actions = 5
self.list_seqtrain_file = []
self.list_train_file = []
self.list_seqvalid_file = []
self.list_validStep_file = []
self.list_valid_file = []
self.list_test_file = []
self.hashids = Hashids(alphabet='01234567789abcdef', min_length=5)
self.pathtransformer = self.pathtransformer_RelativeCoordinate
self.label_setup = '{}{:02d}x{:02d}_density_p{}/{}_Agent'.format(self.config.loadmap_TYPE, self.size_map[0],self.size_map[1],
self.label_density,
self.num_agents)
self.dirName_parent = os.path.join(self.config.solCases_dir, self.label_setup)
self.dirName_Store = os.path.join(self.config.dir_SaveData, self.label_setup)
self.dirName_input = os.path.join(self.dirName_parent, 'input')
self.dirName_output = os.path.join(self.dirName_parent, 'output_{}'.format(config.chosen_solver))
self.set_up()
if self.config.dynamic_commR:
# comm radius that ensure initial graph connected
print("run on multirobotsim (radius dynamic) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix
else:
# comm radius fixed
print("run on multirobotsim (radius fixed) with collision shielding")
self.getAdjacencyMatrix = self.computeAdjacencyMatrix_fixedCommRadius
def set_up(self):
self.list_failureCases_solution = self.search_failureCases(self.dirName_output)
self.list_failureCases_input = self.search_failureCases(self.dirName_input)
self.nameprefix_input = self.list_failureCases_input[0].split('input/')[-1].split('ID')[0]
self.list_failureCases_solution = sorted(self.list_failureCases_solution)
self.list_sol_training = []
self.list_sol_valid = []
self.list_sol_test = []
for i in range(self.config.div_train_IDMap, self.config.div_test_IDMap):
for case in self.list_failureCases_solution:
if "_IDMap{:05d}".format(i) in case:
# print(case)
self.list_sol_training.append(case)
for i in range(self.config.div_test_IDMap, self.config.div_valid_IDMap):
for case in self.list_failureCases_solution:
if "_IDMap{:05d}".format(i) in case:
# print(case)
self.list_sol_test.append(case)
for i in range(self.config.div_valid_IDMap, self.config.maxNum_Map):
for case in self.list_failureCases_solution:
if "_IDMap{:05d}".format(i) in case:
# print(case)
self.list_sol_valid.append(case)
self.list_sol_training = sorted(self.list_sol_training)
self.list_sol_valid = sorted(self.list_sol_valid)
self.list_sol_valid = sorted(self.list_sol_valid)
# print(self.list_sol_test)
self.len_failureCases_solution = len(self.list_failureCases_solution)
def reset(self):
self.task_queue = Queue()
dirpath = self.dirName_Store
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
self.path_save_solDATA = self.dirName_Store
try:
# Create target Directory
os.makedirs(self.path_save_solDATA)
os.makedirs(os.path.join(self.path_save_solDATA, 'train'))
os.makedirs(os.path.join(self.path_save_solDATA, 'valid'))
os.makedirs(os.path.join(self.path_save_solDATA, 'test'))
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def solutionTransformer(self):
# div_train = 21000
# div_valid = 61
# div_test = 4500
# div_train = 0
# div_valid = 0
# div_test = 1500
div_train = self.config.div_train
div_valid = self.config.div_valid
div_test = self.config.div_test
if self.config.div_extend_valid !=0:
num_used_data = div_train + div_valid + div_test + self.config.div_extend_valid
else:
num_used_data = div_train + div_valid + div_test
num_data_loop = min(num_used_data, self.len_failureCases_solution)
for id_sol in range(0, div_train):
mode = "train"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
for id_sol in range(0, div_valid):
mode = "valid"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
for id_sol in range(0, div_test):
mode = "test"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
# for id_sol in range(num_data_loop):
# for id_sol in range(self.config.id_start, num_data_loop):
# if id_sol < div_train:
# mode = "train"
# case_config = (mode, id_sol)
# self.task_queue.put(case_config)
# elif id_sol < (div_train+div_valid):
# mode = "valid"
# case_config = (mode, id_sol)
# self.task_queue.put(case_config)
# elif id_sol < (div_train + div_valid + div_test):
# mode = "test"
# case_config = (mode, id_sol)
# self.task_queue.put(case_config)
#
# elif id_sol <= num_used_data:
# mode = "valid"
# case_config = (mode, id_sol)
# self.task_queue.put(case_config)
time.sleep(0.3)
processes = []
for i in range(self.PROCESS_NUMBER):
# Run Multiprocesses
p = Process(target=self.compute_thread, args=(str(i)))
processes.append(p)
[x.start() for x in processes]
def compute_thread(self,thread_id):
while True:
try:
case_config = self.task_queue.get(block=False)
(mode, id_sol) = case_config
print('thread {} get task:{} - {}'.format(thread_id, mode, id_sol))
self.pipeline(case_config)
except:
# print('thread {} no task, exit'.format(thread_id))
return
def pipeline(self, case_config):
(mode, id_sol) = case_config
agents_schedule, agents_goal, makespan, map_data, id_case = self.load_ExpertSolution(mode, id_sol)
# agents_schedule, agents_goal, makespan, map_data, id_case = self.load_ExpertSolution_(id_sol)
log_str = 'Transform_failureCases_ID_#{} in MAP_ID{}'.format(id_case[1],id_case[0])
print('############## {} ###############'.format(log_str))
# print(agents_schedule)
if mode == "train" or mode == "valid":
self.pathtransformer(map_data, agents_schedule, agents_goal, makespan + 1, id_case, mode)
else:
self.pathtransformer_test(map_data, agents_schedule, agents_goal, makespan + 1, id_case, mode)
def load_ExpertSolution(self, mode, ID_case):
if mode == 'train':
name_solution_file = self.list_sol_training[ID_case]
elif mode == 'valid':
name_solution_file = self.list_sol_valid[ID_case]
elif mode == 'test':
name_solution_file = self.list_sol_test[ID_case]
# id_solved_case = name_solution_file.split('_ID')[-1].split('.yaml')[0]
map_setup = name_solution_file.split('output_')[-1].split('_IDMap')[0]
id_sol_map = name_solution_file.split('_IDMap')[-1].split('_IDCase')[0]
id_sol_case = name_solution_file.split('_IDCase')[-1].split('_')[0]
name_inputfile = os.path.join(self.dirName_input,
'input_{}_IDMap{}_IDCase{}.yaml'.format(map_setup, id_sol_map, id_sol_case))
print(name_inputfile)
print(name_solution_file)
with open(name_inputfile, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
with open(name_solution_file, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
agentsConfig = data_config['agents']
num_agent = len(agentsConfig)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
map_data = np.zeros(self.size_map, dtype=np.int64)
else:
map_data = self.setup_map(list_posObstacle)
schedule = data_output['schedule']
makespan = data_output['statistics']['makespan']
goal_allagents = np.zeros([num_agent, 2])
schedule_agentsState = np.zeros([makespan + 1, num_agent, 2])
schedule_agentsActions = np.zeros([makespan + 1, num_agent, self.num_actions])
schedule_agents = [schedule_agentsState, schedule_agentsActions]
hash_ids = np.zeros(self.num_agents)
for id_agent in range(num_agent):
goalX = agentsConfig[id_agent]['goal'][0]
goalY = agentsConfig[id_agent]['goal'][1]
goal_allagents[id_agent][:] = [goalX, goalY]
schedule_agents = self.obtainSchedule(id_agent, schedule, schedule_agents, goal_allagents, makespan + 1)
str_id = '{}_{}_{}'.format(id_sol_map,id_sol_case,id_agent)
int_id = int(hashlib.sha256(str_id.encode('utf-8')).hexdigest(), 16) % (10 ** 5)
# hash_ids[id_agent]=np.divide(int_id,10**5)
hash_ids[id_agent] = int_id
# print(id_sol_map, id_sol_case, hash_ids)
return schedule_agents, goal_allagents, makespan, map_data, (id_sol_map, id_sol_case, hash_ids)
def load_ExpertSolution_(self, ID_case):
name_solution_file = self.list_failureCases_solution[ID_case]
id_sol_case = name_solution_file.split('_ID')[-1].split('.yaml')[0]
map_setup = 'demo'
id_sol_map = '0'
name_inputfile = os.path.join(self.dirName_input,
'failureCases_ID{}.yaml'.format(id_sol_case))
# print(name_inputfile)
# print(name_solution_file)
with open(name_inputfile, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
with open(name_solution_file, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
agentsConfig = data_config['agents']
num_agent = len(agentsConfig)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
map_data = np.zeros(self.size_map, dtype=np.int64)
else:
map_data = self.setup_map(list_posObstacle)
schedule = data_output['schedule']
makespan = data_output['statistics']['makespan']
# print(schedule)
goal_allagents = np.zeros([num_agent, 2])
schedule_agentsState = np.zeros([makespan + 1, num_agent, 2])
schedule_agentsActions = np.zeros([makespan + 1, num_agent, self.num_actions])
schedule_agents = [schedule_agentsState, schedule_agentsActions]
hash_ids = np.zeros(self.num_agents)
for id_agent in range(num_agent):
goalX = agentsConfig[id_agent]['goal'][0]
goalY = agentsConfig[id_agent]['goal'][1]
goal_allagents[id_agent][:] = [goalX, goalY]
schedule_agents = self.obtainSchedule(id_agent, schedule, schedule_agents, goal_allagents, makespan + 1)
str_id = '{}_{}_{}'.format(id_sol_map, id_sol_case, id_agent)
int_id = int(hashlib.sha256(str_id.encode('utf-8')).hexdigest(), 16) % (10 ** 5)
# hash_ids[id_agent]=np.divide(int_id,10**5)
hash_ids[id_agent] = int_id
print(schedule_agents)
# print(id_sol_map, id_sol_case, hash_ids)
return schedule_agents, goal_allagents, makespan, map_data, (id_sol_map, id_sol_case, hash_ids)
def obtainSchedule(self, id_agent, agentplan, schedule_agents, goal_allagents, teamMakeSpan):
name_agent = "agent{}".format(id_agent)
[schedule_agentsState, schedule_agentsActions] = schedule_agents
planCurrentAgent = agentplan[name_agent]
pathLengthCurrentAgent = len(planCurrentAgent)
actionKeyListAgent = []
for step in range(teamMakeSpan):
if step < pathLengthCurrentAgent:
currentX = planCurrentAgent[step]['x']
currentY = planCurrentAgent[step]['y']
else:
currentX = goal_allagents[id_agent][0]
currentY = goal_allagents[id_agent][1]
schedule_agentsState[step][id_agent][:] = [currentX, currentY]
# up left down right stop
actionVectorTarget = [0, 0, 0, 0, 0]
# map action with respect to the change of position of agent
if step < (pathLengthCurrentAgent - 1):
nextX = planCurrentAgent[step + 1]['x']
nextY = planCurrentAgent[step + 1]['y']
# actionCurrent = [nextX - currentX, nextY - currentY]
elif step >= (pathLengthCurrentAgent - 1):
nextX = goal_allagents[id_agent][0]
nextY = goal_allagents[id_agent][1]
actionCurrent = [nextX - currentX, nextY - currentY]
actionKeyIndex = self.delta.index(actionCurrent)
actionKeyListAgent.append(actionKeyIndex)
actionVectorTarget[actionKeyIndex] = 1
schedule_agentsActions[step][id_agent][:] = actionVectorTarget
return [schedule_agentsState,schedule_agentsActions]
def setup_map(self, list_posObstacle):
num_obstacle = len(list_posObstacle)
map_data = np.zeros(self.size_map)
for ID_obs in range(num_obstacle):
obstacleIndexX = list_posObstacle[ID_obs][0]
obstacleIndexY = list_posObstacle[ID_obs][1]
map_data[obstacleIndexX][obstacleIndexY] = 1
return map_data
def pathtransformer_RelativeCoordinate(self, map_data, agents_schedule, agents_goal, makespan, ID_case, mode):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
# print(ID_case)
# compute AdjacencyMatrix
GSO, communicationRadius = self.getAdjacencyMatrix(schedule_agentsState, self.communicationRadius)
(id_sol_map, id_sol_case, _) = ID_case
# transform into relative Coordinate, loop "makespan" times
self.AgentState.setmap(map_data)
input_seq_tensor = self.AgentState.toSeqInputTensor(agents_goal, schedule_agentsState, makespan)
list_input = input_seq_tensor.cpu().detach().numpy()
save_PairredData.update({'map': map_data, 'goal': agents_goal, 'inputState': schedule_agentsState,
'inputTensor': list_input, 'target': schedule_agentsActions,
'GSO': GSO,'makespan':makespan, 'HashIDs':ID_case[2], 'ID_Map':int(id_sol_map), 'ID_case':int(id_sol_case)})
# print(save_PairredData)
self.save(mode, save_PairredData, ID_case, makespan)
print("Save as {}set_#{} from MAP ID_{}.".format(mode, ID_case[1], ID_case[0]))
def pathtransformer_test(self, map_data, agents_schedule, agents_goal, makespan, ID_case, mode):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
(id_sol_map, id_sol_case, _) = ID_case
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
save_PairredData.update({'map': map_data, 'goal': agents_goal,
'inputState': schedule_agentsState[0],
'target': schedule_agentsActions,
'makespan': makespan, 'HashIDs':ID_case[2], 'ID_Map':int(id_sol_map), 'ID_case':int(id_sol_case)})
# print(save_PairredData)
self.save(mode, save_PairredData, ID_case, makespan)
print("Save as {}set_#{} from MAP ID_{}.".format(mode, ID_case[1], ID_case[0]))
def save(self, mode, save_PairredData, ID_case, makespan):
(id_sol_map, id_sol_case,_) = ID_case
file_name = os.path.join(self.path_save_solDATA, mode,'{}_IDMap{}_IDCase{}_MP{}.mat'.format(mode, id_sol_map, id_sol_case, makespan))
# print(file_name)
sio.savemat(file_name, save_PairredData)
def record_pathdata(self, mode, ID_case, makespan):
(id_sol_map, id_sol_case) = ID_case
data_name_mat = '{}_IDMap{}_IDCase{}_MP{}.mat'.format(mode, id_sol_map, id_sol_case, makespan)
if mode == "train":
self.list_seqtrain_file.append([data_name_mat, makespan, 0])
# print("\n train --", self.list_seqtrain_file)
for step in range(makespan):
self.list_train_file.append([data_name_mat, step, 0])
elif mode =='validStep':
self.list_seqvalid_file.append([data_name_mat, makespan, 0])
for step in range(makespan):
self.list_validStep_file.append([data_name_mat, step, 0])
elif mode == "valid":
self.list_valid_file.append([data_name_mat, makespan, 0]) # 0
elif mode == "test":
self.list_test_file.append([data_name_mat, makespan, 0]) # 0
def save_filepath(self):
dirName = self.path_save_solDATA
file_seqtrain_name = os.path.join(dirName,'{}seq_filename.csv'.format('train'))
with open(file_seqtrain_name, "w", newline="") as f:
writer = csv.writer(f)
print("\n train hello --", self.list_seqtrain_file)
writer.writerows(self.list_seqtrain_file)
file_train_name = os.path.join(dirName,'{}_filename.csv'.format('train'))
with open(file_train_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_train_file)
file_seqvalid_name = os.path.join(dirName,'{}seq_filename.csv'.format('valid'))
with open(file_seqvalid_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_seqvalid_file)
file_validStep_name = os.path.join(dirName,'{}_filename.csv'.format('validStep'))
with open(file_validStep_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_validStep_file)
file_valid_name = os.path.join(dirName,'{}_filename.csv'.format('valid'))
with open(file_valid_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_valid_file)
file_test_name = os.path.join(dirName,'{}_filename.csv'.format('test'))
with open(file_test_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_test_file)
def search_failureCases(self, dir):
# make a list of file name of input yaml
list_path = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.is_target_file(fname):
path = os.path.join(root, fname)
list_path.append(path)
return list_path
def is_target_file(self, filename):
DATA_EXTENSIONS = ['.yaml']
return any(filename.endswith(extension) for extension in DATA_EXTENSIONS)
def computeAdjacencyMatrix(self, pos, CommunicationRadius, connected=True):
# First, transpose the axis of pos so that the rest of the code follows
# through as legible as possible (i.e. convert the last two dimensions
# from 2 x nNodes to nNodes x 2)
# pos: TimeSteps x nAgents x 2 (X, Y)
# Get the appropriate dimensions
nSamples = pos.shape[0]
len_TimeSteps = pos.shape[0] # length of timesteps
nNodes = pos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W = np.zeros([len_TimeSteps, nNodes, nNodes])
threshold = CommunicationRadius # We compute a different
# threshold for each sample, because otherwise one bad trajectory
# will ruin all the adjacency matrices
for t in range(len_TimeSteps):
# Compute the distances
distances = squareform(pdist(pos[t])) # nNodes x nNodes
# Threshold them
W[t] = (distances < threshold).astype(pos.dtype)
# And get rid of the self-loops
W[t] = W[t] - np.diag(np.diag(W[t]))
# Now, check if it is connected, if not, let's make the
# threshold bigger
while (not graph.isConnected(W[t])) and (connected):
# while (not graph.isConnected(W[t])) and (connected):
# Increase threshold
threshold = threshold * 1.1 # Increase 10%
# Compute adjacency matrix
W[t] = (distances < threshold).astype(pos.dtype)
W[t] = W[t] - np.diag(np.diag(W[t]))
# And since the threshold has probably changed, and we want the same
# threshold for all nodes, we repeat:
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
for t in range(len_TimeSteps):
# Initial matrix
allagentPos = pos[t]
distances = squareform(pdist(allagentPos, 'euclidean')) # nNodes x nNodes
W_t = (distances < threshold).astype(allagentPos.dtype)
W_t = W_t - np.diag(np.diag(W_t))
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W_t, axis=0) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W_t = Deg @ W_t @ Deg
maxEigenValue = self.get_maxEigenValue(W_t)
W_norm[t] = W_t/maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
W_norm[t] = W
return W_norm, threshold
def get_maxEigenValue(self, matrix):
isSymmetric = np.allclose(matrix, np.transpose(matrix, axes=[1, 0]))
if isSymmetric:
W = np.linalg.eigvalsh(matrix)
else:
W = np.linalg.eigvals(matrix)
maxEigenvalue = np.max(np.real(W), axis=0)
return maxEigenvalue
# return np.max(np.abs(np.linalg.eig(matrix)[0]))
def computeAdjacencyMatrix_fixedCommRadius(self, pos, CommunicationRadius, connected=True):
len_TimeSteps = pos.shape[0] # length of timesteps
nNodes = pos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W_norm = np.zeros([len_TimeSteps, nNodes, nNodes])
for t in range(len_TimeSteps):
# Initial matrix
allagentPos = pos[t]
distances = squareform(pdist(allagentPos, 'euclidean')) # nNodes x nNodes
W = (distances < CommunicationRadius).astype(allagentPos.dtype)
W = W - np.diag(np.diag(W))
if np.any(W):
# if W is all non-zero matrix, do normalization
if self.config.symmetric_norm:
deg = np.sum(W, axis=0) # nNodes (degree vector)
zeroDeg = np.nonzero(np.abs(deg) < self.zeroTolerance)
deg[zeroDeg] = 1.
invSqrtDeg = np.sqrt(1. / deg)
invSqrtDeg[zeroDeg] = 0.
Deg = np.diag(invSqrtDeg)
W = Deg @ W @ Deg
maxEigenValue = self.get_maxEigenValue(W)
W_norm[t] = W/maxEigenValue
else:
# if W is all zero matrix, don't do any normalization
print('No robot are connected at this moment, all zero matrix.')
W_norm[t] = W
return W_norm, CommunicationRadius
if __name__ == '__main__':
DataTransformer = DataTransformer(args)
DataTransformer.reset()
DataTransformer.solutionTransformer()
|
websocket_client.py
|
import json
import logging
import socket
import ssl
import sys
import traceback
from datetime import datetime
from threading import Lock, Thread
from time import sleep
from typing import Optional
import websocket
from howtrader.trader.utility import get_file_logger
class WebsocketClient:
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
self.logger: Optional[logging.Logger] = None
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None,
log_path: Optional[str] = None,
):
"""
:param host:
:param proxy_host:
:param proxy_port:
:param header:
:param ping_interval: unit: seconds, type: int
:param log_path: optional. file to save log.
"""
self.host = host
self.ping_interval = ping_interval # seconds
if log_path is not None:
self.logger = get_file_logger(log_path)
self.logger.setLevel(logging.DEBUG)
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _log(self, msg, *args):
logger = self.logger
if logger:
logger.debug(msg, *args)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
self._log('sent text: %s', text)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
self._log('sent binary: %s', data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
# http_proxy_host=self.proxy_host,
# http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self._disconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self._log('recv data: %s', data)
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (
websocket.WebSocketConnectionClosedException,
websocket.WebSocketBadStatusException,
socket.error
):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
test_ssl.py
|
import pytest
import threading
import socket as stdlib_socket
import ssl as stdlib_ssl
from contextlib import contextmanager
from functools import partial
from OpenSSL import SSL
import trustme
from async_generator import async_generator, yield_, asynccontextmanager
import trio
from .. import _core
from .._highlevel_socket import SocketStream, SocketListener
from .._highlevel_generic import aclose_forcefully
from .._core import ClosedResourceError, BrokenResourceError
from .._highlevel_open_tcp_stream import open_tcp_stream
from .. import ssl as tssl
from .. import socket as tsocket
from .._util import ConflictDetector
from .._core.tests.tutil import slow
from ..testing import (
assert_checkpoints,
Sequencer,
memory_stream_pair,
lockstep_stream_pair,
check_two_way_stream,
)
# We have two different kinds of echo server fixtures we use for testing. The
# first is a real server written using the stdlib ssl module and blocking
# sockets. It runs in a thread and we talk to it over a real socketpair(), to
# validate interoperability in a semi-realistic setting.
#
# The second is a very weird virtual echo server that lives inside a custom
# Stream class. It lives entirely inside the Python object space; there are no
# operating system calls in it at all. No threads, no I/O, nothing. It's
# 'send_all' call takes encrypted data from a client and feeds it directly into
# the server-side TLS state engine to decrypt, then takes that data, feeds it
# back through to get the encrypted response, and returns it from 'receive_some'. This
# gives us full control and reproducibility. This server is written using
# PyOpenSSL, so that we can trigger renegotiations on demand. It also allows
# us to insert random (virtual) delays, to really exercise all the weird paths
# in SSLStream's state engine.
#
# Both present a certificate for "trio-test-1.example.org".
TRIO_TEST_CA = trustme.CA()
TRIO_TEST_1_CERT = TRIO_TEST_CA.issue_server_cert("trio-test-1.example.org")
SERVER_CTX = stdlib_ssl.create_default_context(stdlib_ssl.Purpose.CLIENT_AUTH)
TRIO_TEST_1_CERT.configure_cert(SERVER_CTX)
CLIENT_CTX = stdlib_ssl.create_default_context()
TRIO_TEST_CA.configure_trust(CLIENT_CTX)
# The blocking socket server.
def ssl_echo_serve_sync(sock, *, expect_fail=False):
try:
wrapped = SERVER_CTX.wrap_socket(
sock, server_side=True, suppress_ragged_eofs=False
)
wrapped.do_handshake()
while True:
data = wrapped.recv(4096)
if not data:
# other side has initiated a graceful shutdown; we try to
# respond in kind but it's legal for them to have already gone
# away.
exceptions = (BrokenPipeError,)
# Under unclear conditions, CPython sometimes raises
# SSLWantWriteError here. This is a bug (bpo-32219), but it's
# not our bug, so ignore it.
exceptions += (stdlib_ssl.SSLWantWriteError,)
try:
wrapped.unwrap()
except exceptions:
pass
return
wrapped.sendall(data)
except Exception as exc:
if expect_fail:
print("ssl_echo_serve_sync got error as expected:", exc)
else: # pragma: no cover
raise
else:
if expect_fail: # pragma: no cover
raise RuntimeError("failed to fail?")
# Fixture that gives a raw socket connected to a trio-test-1 echo server
# (running in a thread). Useful for testing making connections with different
# SSLContexts.
@asynccontextmanager
@async_generator
async def ssl_echo_server_raw(**kwargs):
a, b = stdlib_socket.socketpair()
async with trio.open_nursery() as nursery:
# Exiting the 'with a, b' context manager closes the sockets, which
# causes the thread to exit (possibly with an error), which allows the
# nursery context manager to exit too.
with a, b:
nursery.start_soon(
trio.run_sync_in_worker_thread,
partial(ssl_echo_serve_sync, b, **kwargs)
)
await yield_(SocketStream(tsocket.from_stdlib_socket(a)))
# Fixture that gives a properly set up SSLStream connected to a trio-test-1
# echo server (running in a thread)
@asynccontextmanager
@async_generator
async def ssl_echo_server(**kwargs):
async with ssl_echo_server_raw(**kwargs) as sock:
await yield_(
tssl.SSLStream(
sock, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
)
# The weird in-memory server ... thing.
# Doesn't inherit from Stream because I left out the methods that we don't
# actually need.
class PyOpenSSLEchoStream:
def __init__(self, sleeper=None):
ctx = SSL.Context(SSL.SSLv23_METHOD)
# TLS 1.3 removes renegotiation support. Which is great for them, but
# we still have to support versions before that, and that means we
# need to test renegotation support, which means we need to force this
# to use a lower version where this test server can trigger
# renegotiations. Of course TLS 1.3 support isn't released yet, but
# I'm told that this will work once it is. (And once it is we can
# remove the pragma: no cover too.) Alternatively, once we drop
# support for CPython 3.5 on macOS, then we could switch to using
# TLSv1_2_METHOD.
#
# Discussion: https://github.com/pyca/pyopenssl/issues/624
if hasattr(SSL, "OP_NO_TLSv1_3"): # pragma: no cover
ctx.set_options(SSL.OP_NO_TLSv1_3)
# Unfortunately there's currently no way to say "use 1.3 or worse", we
# can only disable specific versions. And if the two sides start
# negotiating 1.4 at some point in the future, it *might* mean that
# our tests silently stop working properly. So the next line is a
# tripwire to remind us we need to revisit this stuff in 5 years or
# whatever when the next TLS version is released:
assert not hasattr(SSL, "OP_NO_TLSv1_4")
TRIO_TEST_1_CERT.configure_cert(ctx)
self._conn = SSL.Connection(ctx, None)
self._conn.set_accept_state()
self._lot = _core.ParkingLot()
self._pending_cleartext = bytearray()
self._send_all_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.send_all"
)
self._receive_some_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.receive_some"
)
if sleeper is None:
async def no_op_sleeper(_):
return
self.sleeper = no_op_sleeper
else:
self.sleeper = sleeper
async def aclose(self):
self._conn.bio_shutdown()
def renegotiate_pending(self):
return self._conn.renegotiate_pending()
def renegotiate(self):
# Returns false if a renegotation is already in progress, meaning
# nothing happens.
assert self._conn.renegotiate()
async def wait_send_all_might_not_block(self):
async with self._send_all_conflict_detector:
await _core.checkpoint()
await self.sleeper("wait_send_all_might_not_block")
async def send_all(self, data):
print(" --> transport_stream.send_all")
async with self._send_all_conflict_detector:
await _core.checkpoint()
await self.sleeper("send_all")
self._conn.bio_write(data)
while True:
await self.sleeper("send_all")
try:
data = self._conn.recv(1)
except SSL.ZeroReturnError:
self._conn.shutdown()
print("renegotiations:", self._conn.total_renegotiations())
break
except SSL.WantReadError:
break
else:
self._pending_cleartext += data
self._lot.unpark_all()
await self.sleeper("send_all")
print(" <-- transport_stream.send_all finished")
async def receive_some(self, nbytes):
print(" --> transport_stream.receive_some")
async with self._receive_some_conflict_detector:
try:
await _core.checkpoint()
while True:
await self.sleeper("receive_some")
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# No data in our ciphertext buffer; try to generate
# some.
if self._pending_cleartext:
# We have some cleartext; maybe we can encrypt it
# and then return it.
print(" trying", self._pending_cleartext)
try:
# PyOpenSSL bug: doesn't accept bytearray
# https://github.com/pyca/pyopenssl/issues/621
next_byte = self._pending_cleartext[0:1]
self._conn.send(bytes(next_byte))
# Apparently this next bit never gets hit in the
# test suite, but it's not an interesting omission
# so let's pragma it.
except SSL.WantReadError: # pragma: no cover
# We didn't manage to send the cleartext (and
# in particular we better leave it there to
# try again, due to openssl's retry
# semantics), but it's possible we pushed a
# renegotiation forward and *now* we have data
# to send.
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# Nope. We're just going to have to wait
# for someone to call send_all() to give
# use more data.
print("parking (a)")
await self._lot.park()
else:
# We successfully sent that byte, so we don't
# have to again.
del self._pending_cleartext[0:1]
else:
# no pending cleartext; nothing to do but wait for
# someone to call send_all
print("parking (b)")
await self._lot.park()
finally:
await self.sleeper("receive_some")
print(" <-- transport_stream.receive_some finished")
async def test_PyOpenSSLEchoStream_gives_resource_busy_errors():
# Make sure that PyOpenSSLEchoStream complains if two tasks call send_all
# at the same time, or ditto for receive_some. The tricky cases where SSLStream
# might accidentally do this are during renegotation, which we test using
# PyOpenSSLEchoStream, so this makes sure that if we do have a bug then
# PyOpenSSLEchoStream will notice and complain.
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.send_all, b"x")
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.wait_send_all_might_not_block)
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.receive_some, 1)
nursery.start_soon(s.receive_some, 1)
assert "simultaneous" in str(excinfo.value)
@contextmanager
def virtual_ssl_echo_server(**kwargs):
fakesock = PyOpenSSLEchoStream(**kwargs)
yield tssl.SSLStream(
fakesock, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
def ssl_wrap_pair(
client_transport, server_transport, *, client_kwargs={}, server_kwargs={}
):
client_ssl = tssl.SSLStream(
client_transport,
CLIENT_CTX,
server_hostname="trio-test-1.example.org",
**client_kwargs
)
server_ssl = tssl.SSLStream(
server_transport, SERVER_CTX, server_side=True, **server_kwargs
)
return client_ssl, server_ssl
def ssl_memory_stream_pair(**kwargs):
client_transport, server_transport = memory_stream_pair()
return ssl_wrap_pair(client_transport, server_transport, **kwargs)
def ssl_lockstep_stream_pair(**kwargs):
client_transport, server_transport = lockstep_stream_pair()
return ssl_wrap_pair(client_transport, server_transport, **kwargs)
def test_exports():
# Just a quick check to make sure _reexport isn't totally broken
assert hasattr(tssl, "SSLError")
assert "SSLError" in tssl.__dict__.keys()
assert hasattr(tssl, "Purpose")
assert "Purpose" in tssl.__dict__.keys()
# Intentionally omitted
assert not hasattr(tssl, "SSLContext")
# Simple smoke test for handshake/send/receive/shutdown talking to a
# synchronous server, plus make sure that we do the bare minimum of
# certificate checking (even though this is really Python's responsibility)
async def test_ssl_client_basics():
# Everything OK
async with ssl_echo_server() as s:
assert not s.server_side
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
# Didn't configure the CA file, should fail
async with ssl_echo_server_raw(expect_fail=True) as sock:
client_ctx = stdlib_ssl.create_default_context()
s = tssl.SSLStream(
sock, client_ctx, server_hostname="trio-test-1.example.org"
)
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, tssl.SSLError)
# Trusted CA, but wrong host name
async with ssl_echo_server_raw(expect_fail=True) as sock:
s = tssl.SSLStream(
sock, CLIENT_CTX, server_hostname="trio-test-2.example.org"
)
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, tssl.CertificateError)
async def test_ssl_server_basics():
a, b = stdlib_socket.socketpair()
with a, b:
server_sock = tsocket.from_stdlib_socket(b)
server_transport = tssl.SSLStream(
SocketStream(server_sock), SERVER_CTX, server_side=True
)
assert server_transport.server_side
def client():
client_sock = CLIENT_CTX.wrap_socket(
a, server_hostname="trio-test-1.example.org"
)
client_sock.sendall(b"x")
assert client_sock.recv(1) == b"y"
client_sock.sendall(b"z")
client_sock.unwrap()
t = threading.Thread(target=client)
t.start()
assert await server_transport.receive_some(1) == b"x"
await server_transport.send_all(b"y")
assert await server_transport.receive_some(1) == b"z"
assert await server_transport.receive_some(1) == b""
await server_transport.aclose()
t.join()
async def test_attributes():
async with ssl_echo_server_raw(expect_fail=True) as sock:
good_ctx = CLIENT_CTX
bad_ctx = stdlib_ssl.create_default_context()
s = tssl.SSLStream(
sock, good_ctx, server_hostname="trio-test-1.example.org"
)
assert s.transport_stream is sock
# Forwarded attribute getting
assert s.context is good_ctx
assert s.server_side == False # noqa
assert s.server_hostname == "trio-test-1.example.org"
with pytest.raises(AttributeError):
s.asfdasdfsa
# __dir__
assert "transport_stream" in dir(s)
assert "context" in dir(s)
# Setting the attribute goes through to the underlying object
# most attributes on SSLObject are read-only
with pytest.raises(AttributeError):
s.server_side = True
with pytest.raises(AttributeError):
s.server_hostname = "asdf"
# but .context is *not*. Check that we forward attribute setting by
# making sure that after we set the bad context our handshake indeed
# fails:
s.context = bad_ctx
assert s.context is bad_ctx
with pytest.raises(BrokenResourceError) as excinfo:
await s.do_handshake()
assert isinstance(excinfo.value.__cause__, tssl.SSLError)
# Note: this test fails horribly if we force TLS 1.2 and trigger a
# renegotiation at the beginning (e.g. by switching to the pyopenssl
# server). Usually the client crashes in SSLObject.write with "UNEXPECTED
# RECORD"; sometimes we get something more exotic like a SyscallError. This is
# odd because openssl isn't doing any syscalls, but so it goes. After lots of
# websearching I'm pretty sure this is due to a bug in OpenSSL, where it just
# can't reliably handle full-duplex communication combined with
# renegotiation. Nice, eh?
#
# https://rt.openssl.org/Ticket/Display.html?id=3712
# https://rt.openssl.org/Ticket/Display.html?id=2481
# http://openssl.6102.n7.nabble.com/TLS-renegotiation-failure-on-receiving-application-data-during-handshake-td48127.html
# https://stackoverflow.com/questions/18728355/ssl-renegotiation-with-full-duplex-socket-communication
#
# In some variants of this test (maybe only against the java server?) I've
# also seen cases where our send_all blocks waiting to write, and then our receive_some
# also blocks waiting to write, and they never wake up again. It looks like
# some kind of deadlock. I suspect there may be an issue where we've filled up
# the send buffers, and the remote side is trying to handle the renegotiation
# from inside a write() call, so it has a problem: there's all this application
# data clogging up the pipe, but it can't process and return it to the
# application because it's in write(), and it doesn't want to buffer infinite
# amounts of data, and... actually I guess those are the only two choices.
#
# NSS even documents that you shouldn't try to do a renegotiation except when
# the connection is idle:
#
# https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/SSL_functions/sslfnc.html#1061582
#
# I begin to see why HTTP/2 forbids renegotiation and TLS 1.3 removes it...
async def test_full_duplex_basics():
CHUNKS = 30
CHUNK_SIZE = 32768
EXPECTED = CHUNKS * CHUNK_SIZE
sent = bytearray()
received = bytearray()
async def sender(s):
nonlocal sent
for i in range(CHUNKS):
print(i)
chunk = bytes([i] * CHUNK_SIZE)
sent += chunk
await s.send_all(chunk)
async def receiver(s):
nonlocal received
while len(received) < EXPECTED:
chunk = await s.receive_some(CHUNK_SIZE // 2)
received += chunk
async with ssl_echo_server() as s:
async with _core.open_nursery() as nursery:
nursery.start_soon(sender, s)
nursery.start_soon(receiver, s)
# And let's have some doing handshakes too, everyone
# simultaneously
nursery.start_soon(s.do_handshake)
nursery.start_soon(s.do_handshake)
await s.aclose()
assert len(sent) == len(received) == EXPECTED
assert sent == received
async def test_renegotiation_simple():
with virtual_ssl_echo_server() as s:
await s.do_handshake()
s.transport_stream.renegotiate()
await s.send_all(b"a")
assert await s.receive_some(1) == b"a"
# Have to send some more data back and forth to make sure the
# renegotiation is finished before shutting down the
# connection... otherwise openssl raises an error. I think this is a
# bug in openssl but what can ya do.
await s.send_all(b"b")
assert await s.receive_some(1) == b"b"
await s.aclose()
@slow
async def test_renegotiation_randomized(mock_clock):
# The only blocking things in this function are our random sleeps, so 0 is
# a good threshold.
mock_clock.autojump_threshold = 0
import random
r = random.Random(0)
async def sleeper(_):
await trio.sleep(r.uniform(0, 10))
async def clear():
while s.transport_stream.renegotiate_pending():
with assert_checkpoints():
await send(b"-")
with assert_checkpoints():
await expect(b"-")
print("-- clear --")
async def send(byte):
await s.transport_stream.sleeper("outer send")
print("calling SSLStream.send_all", byte)
with assert_checkpoints():
await s.send_all(byte)
async def expect(expected):
await s.transport_stream.sleeper("expect")
print("calling SSLStream.receive_some, expecting", expected)
assert len(expected) == 1
with assert_checkpoints():
assert await s.receive_some(1) == expected
with virtual_ssl_echo_server(sleeper=sleeper) as s:
await s.do_handshake()
await send(b"a")
s.transport_stream.renegotiate()
await expect(b"a")
await clear()
for i in range(100):
b1 = bytes([i % 0xff])
b2 = bytes([(2 * i) % 0xff])
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(send, b1)
nursery.start_soon(expect, b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
for i in range(100):
b1 = bytes([i % 0xff])
b2 = bytes([(2 * i) % 0xff])
await send(b1)
s.transport_stream.renegotiate()
await expect(b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
# Checking that wait_send_all_might_not_block and receive_some don't
# conflict:
# 1) Set up a situation where expect (receive_some) is blocked sending,
# and wait_send_all_might_not_block comes in.
# Our receive_some() call will get stuck when it hits send_all
async def sleeper_with_slow_send_all(method):
if method == "send_all":
await trio.sleep(100000)
# And our wait_send_all_might_not_block call will give it time to get
# stuck, and then start
async def sleep_then_wait_writable():
await trio.sleep(1000)
await s.wait_send_all_might_not_block()
with virtual_ssl_echo_server(sleeper=sleeper_with_slow_send_all) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(sleep_then_wait_writable)
await clear()
await s.aclose()
# 2) Same, but now wait_send_all_might_not_block is stuck when
# receive_some tries to send.
async def sleeper_with_slow_wait_writable_and_expect(method):
if method == "wait_send_all_might_not_block":
await trio.sleep(100000)
elif method == "expect":
await trio.sleep(1000)
with virtual_ssl_echo_server(
sleeper=sleeper_with_slow_wait_writable_and_expect
) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
await clear()
await s.aclose()
async def test_resource_busy_errors():
async def do_send_all():
with assert_checkpoints():
await s.send_all(b"x")
async def do_receive_some():
with assert_checkpoints():
await s.receive_some(1)
async def do_wait_send_all_might_not_block():
with assert_checkpoints():
await s.wait_send_all_might_not_block()
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_send_all)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_receive_some)
nursery.start_soon(do_receive_some)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_wait_send_all_might_not_block)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
async def test_wait_writable_calls_underlying_wait_writable():
record = []
class NotAStream:
async def wait_send_all_might_not_block(self):
record.append("ok")
ctx = stdlib_ssl.create_default_context()
s = tssl.SSLStream(NotAStream(), ctx, server_hostname="x")
await s.wait_send_all_might_not_block()
assert record == ["ok"]
async def test_checkpoints():
async with ssl_echo_server() as s:
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.wait_send_all_might_not_block()
with assert_checkpoints():
await s.send_all(b"xxx")
with assert_checkpoints():
await s.receive_some(1)
# These receive_some's in theory could return immediately, because the
# "xxx" was sent in a single record and after the first
# receive_some(1) the rest are sitting inside the SSLObject's internal
# buffers.
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.unwrap()
async with ssl_echo_server() as s:
await s.do_handshake()
with assert_checkpoints():
await s.aclose()
async def test_send_all_empty_string():
async with ssl_echo_server() as s:
await s.do_handshake()
# underlying SSLObject interprets writing b"" as indicating an EOF,
# for some reason. Make sure we don't inherit this.
with assert_checkpoints():
await s.send_all(b"")
with assert_checkpoints():
await s.send_all(b"")
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
@pytest.mark.parametrize("https_compatible", [False, True])
async def test_SSLStream_generic(https_compatible):
async def stream_maker():
return ssl_memory_stream_pair(
client_kwargs={"https_compatible": https_compatible},
server_kwargs={"https_compatible": https_compatible},
)
async def clogged_stream_maker():
client, server = ssl_lockstep_stream_pair()
# If we don't do handshakes up front, then we run into a problem in
# the following situation:
# - server does wait_send_all_might_not_block
# - client does receive_some to unclog it
# Then the client's receive_some will actually send some data to start
# the handshake, and itself get stuck.
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
return client, server
await check_two_way_stream(stream_maker, clogged_stream_maker)
async def test_unwrap():
client_ssl, server_ssl = ssl_memory_stream_pair()
client_transport = client_ssl.transport_stream
server_transport = server_ssl.transport_stream
seq = Sequencer()
async def client():
await client_ssl.do_handshake()
await client_ssl.send_all(b"x")
assert await client_ssl.receive_some(1) == b"y"
await client_ssl.send_all(b"z")
# After sending that, disable outgoing data from our end, to make
# sure the server doesn't see our EOF until after we've sent some
# trailing data
async with seq(0):
send_all_hook = client_transport.send_stream.send_all_hook
client_transport.send_stream.send_all_hook = None
assert await client_ssl.receive_some(1) == b""
assert client_ssl.transport_stream is client_transport
# We just received EOF. Unwrap the connection and send some more.
raw, trailing = await client_ssl.unwrap()
assert raw is client_transport
assert trailing == b""
assert client_ssl.transport_stream is None
await raw.send_all(b"trailing")
# Reconnect the streams. Now the server will receive both our shutdown
# acknowledgement + the trailing data in a single lump.
client_transport.send_stream.send_all_hook = send_all_hook
await client_transport.send_stream.send_all_hook()
async def server():
await server_ssl.do_handshake()
assert await server_ssl.receive_some(1) == b"x"
await server_ssl.send_all(b"y")
assert await server_ssl.receive_some(1) == b"z"
# Now client is blocked waiting for us to send something, but
# instead we close the TLS connection (with sequencer to make sure
# that the client won't see and automatically respond before we've had
# a chance to disable the client->server transport)
async with seq(1):
raw, trailing = await server_ssl.unwrap()
assert raw is server_transport
assert trailing == b"trailing"
assert server_ssl.transport_stream is None
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_closing_nice_case():
# the nice case: graceful closes all around
client_ssl, server_ssl = ssl_memory_stream_pair()
client_transport = client_ssl.transport_stream
# Both the handshake and the close require back-and-forth discussion, so
# we need to run them concurrently
async def client_closer():
with assert_checkpoints():
await client_ssl.aclose()
async def server_closer():
assert await server_ssl.receive_some(10) == b""
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_closer)
nursery.start_soon(server_closer)
# closing the SSLStream also closes its transport
with pytest.raises(ClosedResourceError):
await client_transport.send_all(b"123")
# once closed, it's OK to close again
with assert_checkpoints():
await client_ssl.aclose()
with assert_checkpoints():
await client_ssl.aclose()
# Trying to send more data does not work
with assert_checkpoints():
with pytest.raises(ClosedResourceError):
await server_ssl.send_all(b"123")
# And once the connection is has been closed *locally*, then instead of
# getting empty bytestrings we get a proper error
with assert_checkpoints():
with pytest.raises(ClosedResourceError):
await client_ssl.receive_some(10) == b""
with assert_checkpoints():
with pytest.raises(ClosedResourceError):
await client_ssl.unwrap()
with assert_checkpoints():
with pytest.raises(ClosedResourceError):
await client_ssl.do_handshake()
# Check that a graceful close *before* handshaking gives a clean EOF on
# the other side
client_ssl, server_ssl = ssl_memory_stream_pair()
async def expect_eof_server():
with assert_checkpoints():
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_ssl.aclose)
nursery.start_soon(expect_eof_server)
async def test_send_all_fails_in_the_middle():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
await client.send_all(b"x")
with pytest.raises(BrokenResourceError):
await client.wait_send_all_might_not_block()
closed = 0
def close_hook():
nonlocal closed
closed += 1
client.transport_stream.send_stream.close_hook = close_hook
client.transport_stream.receive_stream.close_hook = close_hook
await client.aclose()
assert closed == 2
async def test_ssl_over_ssl():
client_0, server_0 = memory_stream_pair()
client_1 = tssl.SSLStream(
client_0, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
server_1 = tssl.SSLStream(server_0, SERVER_CTX, server_side=True)
client_2 = tssl.SSLStream(
client_1, CLIENT_CTX, server_hostname="trio-test-1.example.org"
)
server_2 = tssl.SSLStream(server_1, SERVER_CTX, server_side=True)
async def client():
await client_2.send_all(b"hi")
assert await client_2.receive_some(10) == b"bye"
async def server():
assert await server_2.receive_some(10) == b"hi"
await server_2.send_all(b"bye")
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_ssl_bad_shutdown():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# now the server sees a broken stream
with pytest.raises(BrokenResourceError):
await server.receive_some(10)
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_bad_shutdown_but_its_ok():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True}
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# the server sees that as a clean shutdown
assert await server.receive_some(10) == b""
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_handshake_failure_during_aclose():
# Weird scenario: aclose() triggers an automatic handshake, and this
# fails. This also exercises a bit of code in aclose() that was otherwise
# uncovered, for re-raising exceptions after calling aclose_forcefully on
# the underlying transport.
async with ssl_echo_server_raw(expect_fail=True) as sock:
# Don't configure trust correctly
client_ctx = stdlib_ssl.create_default_context()
s = tssl.SSLStream(
sock, client_ctx, server_hostname="trio-test-1.example.org"
)
# It's a little unclear here whether aclose should swallow the error
# or let it escape. We *do* swallow the error if it arrives when we're
# sending close_notify, because both sides closing the connection
# simultaneously is allowed. But I guess when https_compatible=False
# then it's bad if we can get through a whole connection with a peer
# that has no valid certificate, and never raise an error.
with pytest.raises(BrokenResourceError):
await s.aclose()
async def test_ssl_only_closes_stream_once():
# We used to have a bug where if transport_stream.aclose() raised an
# error, we would call it again. This checks that that's fixed.
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
client_orig_close_hook = client.transport_stream.send_stream.close_hook
transport_close_count = 0
def close_hook():
nonlocal transport_close_count
client_orig_close_hook()
transport_close_count += 1
raise KeyError
client.transport_stream.send_stream.close_hook = close_hook
with pytest.raises(KeyError):
await client.aclose()
assert transport_close_count == 1
async def test_ssl_https_compatibility_disagreement():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": False},
client_kwargs={"https_compatible": True}
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
# client is in HTTPS-mode, server is not
# so client doing graceful_shutdown causes an error on server
async def receive_and_expect_error():
with pytest.raises(BrokenResourceError) as excinfo:
await server.receive_some(10)
assert isinstance(excinfo.value.__cause__, tssl.SSLEOFError)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(receive_and_expect_error)
async def test_https_mode_eof_before_handshake():
client, server = ssl_memory_stream_pair(
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True}
)
async def server_expect_clean_eof():
assert await server.receive_some(10) == b""
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(server_expect_clean_eof)
async def test_send_error_during_handshake():
client, server = ssl_memory_stream_pair()
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_receive_error_during_handshake():
client, server = ssl_memory_stream_pair()
async def bad_hook():
raise KeyError
client.transport_stream.receive_stream.receive_some_hook = bad_hook
async def client_side(cancel_scope):
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
cancel_scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_side, nursery.cancel_scope)
nursery.start_soon(server.do_handshake)
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_selected_alpn_protocol_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(tssl.NeedHandshakeError):
client.selected_alpn_protocol()
with pytest.raises(tssl.NeedHandshakeError):
server.selected_alpn_protocol()
async def test_selected_alpn_protocol_when_not_set():
# ALPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_alpn_protocol() is None
assert server.selected_alpn_protocol() is None
assert client.selected_alpn_protocol() == \
server.selected_alpn_protocol()
async def test_selected_npn_protocol_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(tssl.NeedHandshakeError):
client.selected_npn_protocol()
with pytest.raises(tssl.NeedHandshakeError):
server.selected_npn_protocol()
async def test_selected_npn_protocol_when_not_set():
# NPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_npn_protocol() is None
assert server.selected_npn_protocol() is None
assert client.selected_npn_protocol() == \
server.selected_npn_protocol()
async def test_get_channel_binding_before_handshake():
client, server = ssl_memory_stream_pair()
with pytest.raises(tssl.NeedHandshakeError):
client.get_channel_binding()
with pytest.raises(tssl.NeedHandshakeError):
server.get_channel_binding()
async def test_get_channel_binding_after_handshake():
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.get_channel_binding() is not None
assert server.get_channel_binding() is not None
assert client.get_channel_binding() == \
server.get_channel_binding()
async def test_getpeercert():
# Make sure we're not affected by https://bugs.python.org/issue29334
client, server = ssl_memory_stream_pair()
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert server.getpeercert() is None
print(client.getpeercert())
assert (
("DNS",
"trio-test-1.example.org") in client.getpeercert()["subjectAltName"]
)
async def test_SSLListener():
async def setup(**kwargs):
listen_sock = tsocket.socket()
await listen_sock.bind(("127.0.0.1", 0))
listen_sock.listen(1)
socket_listener = SocketListener(listen_sock)
ssl_listener = tssl.SSLListener(socket_listener, SERVER_CTX, **kwargs)
transport_client = await open_tcp_stream(*listen_sock.getsockname())
ssl_client = tssl.SSLStream(
transport_client,
CLIENT_CTX,
server_hostname="trio-test-1.example.org"
)
return listen_sock, ssl_listener, ssl_client
listen_sock, ssl_listener, ssl_client = await setup()
async with ssl_client:
ssl_server = await ssl_listener.accept()
async with ssl_server:
assert not ssl_server._https_compatible
# Make sure the connection works
async with _core.open_nursery() as nursery:
nursery.start_soon(ssl_client.do_handshake)
nursery.start_soon(ssl_server.do_handshake)
# Test SSLListener.aclose
await ssl_listener.aclose()
assert listen_sock.fileno() == -1
################
# Test https_compatible and max_refill_bytes
_, ssl_listener, ssl_client = await setup(
https_compatible=True,
max_refill_bytes=100,
)
ssl_server = await ssl_listener.accept()
assert ssl_server._https_compatible
assert ssl_server._max_refill_bytes == 100
await aclose_forcefully(ssl_listener)
await aclose_forcefully(ssl_client)
await aclose_forcefully(ssl_server)
|
event.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2012-2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
tests.integration.modules.event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import time
import threading
from Queue import Queue, Empty
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
from salt.utils import event
class EventModuleTest(integration.ModuleCase):
def __test_event_fire_master(self):
events = Queue()
def get_event(events):
me = event.MasterEvent(self.master_opts['sock_dir'])
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn(
'event.fire_master: just test it!!!!', eventfired['data']
)
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(self.minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def __test_event_fire_ipc_mode_tcp(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(self.sub_minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIsNotNone(eventfired)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
if __name__ == '__main__':
from integration import run_tests
run_tests(EventModuleTest)
|
zeromq.py
|
# -*- coding: utf-8 -*-
"""
Zeromq transport classes
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import errno
import hashlib
import logging
import os
import signal
import socket
import sys
import threading
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
# Import Tornado Libs
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.payload
import salt.transport.client
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.versions
import salt.utils.zeromq
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
from salt._compat import ipaddress
from salt.exceptions import SaltException, SaltReqTimeoutError
from salt.ext import six
from salt.utils.zeromq import (
LIBZMQ_VERSION_INFO,
ZMQ_VERSION_INFO,
ZMQDefaultLoop,
install_zmq,
zmq,
)
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None):
"""
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
"""
from salt.utils.zeromq import ip_bracket
master_uri = "tcp://{master_ip}:{master_port}".format(
master_ip=ip_bracket(master_ip), master_port=master_port
)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = "tcp://{source_ip}:{source_port};{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
elif source_ip and not source_port:
master_uri = "tcp://{source_ip}:0;{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
elif source_port and not source_ip:
ip_any = (
"0.0.0.0"
if ipaddress.ip_address(master_ip).version == 4
else ip_bracket("::")
)
master_uri = "tcp://{ip_any}:{source_port};{master_ip}:{master_port}".format(
ip_any=ip_any,
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
else:
log.warning(
"Unable to connect to the Master using a specific source IP / port"
)
log.warning("Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6")
log.warning(
"Specific source IP / port for connecting to master returner port: configuraion ignored"
)
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_do_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop")
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncZeroMQReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace(
"Inserted key into loop_instance_map id %s for key %s and process %s",
id(loop_instance_map),
key,
os.getpid(),
)
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncZeroMQReqChannel for %s", key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
# pylint: disable=too-many-function-args
result = cls.__new__(cls, copy.deepcopy(self.opts, memo))
# pylint: enable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ("_io_loop", "_refcount", "_refcount_lock"):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == "message_client":
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(
result,
key,
AsyncReqMessageClientPool(
result.opts,
args=(result.opts, self.master_uri,),
kwargs={"io_loop": self._io_loop},
),
)
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
kwargs.get("master_uri", opts.get("master_uri")), # master ID
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = "zeromq"
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
if "master_uri" in kwargs:
self.opts["master_uri"] = kwargs["master_uri"]
self._io_loop = kwargs.get("io_loop")
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != "clear":
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug(
"Connecting the Minion to the Master URI (for the return server): %s",
self.master_uri,
)
self.message_client = AsyncReqMessageClientPool(
self.opts,
args=(self.opts, self.master_uri,),
kwargs={"io_loop": self._io_loop},
)
self._closing = False
def close(self):
"""
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
"""
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
if hasattr(self, "message_client"):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if "master_uri" in self.opts:
return self.opts["master_uri"]
# if by chance master_uri is not there..
if "master_ip" in self.opts:
return _get_master_uri(
self.opts["master_ip"],
self.opts["master_port"],
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_ret_port"),
)
# if we've reached here something is very abnormal
raise SaltException("ReqChannel: missing master_uri/master_ip in self.opts")
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if "key" not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
"""
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
"""
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
ret = yield self.message_client.send(
self._package_load(load), timeout=timeout, tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(
load, tries=tries, timeout=timeout, raw=raw
)
raise salt.ext.tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
"""
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
"""
async_methods = [
"connect",
"_decode_messages",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = "zeromq"
self.io_loop = kwargs.get("io_loop")
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(
salt.utils.stringutils.to_bytes(self.opts["id"])
).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts["zmq_filtering"]:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b"broadcast")
if self.opts.get("__role") == "syndic":
self._socket.setsockopt(zmq.SUBSCRIBE, b"syndic")
else:
self._socket.setsockopt(
zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b"")
self._socket.setsockopt(
zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts["id"])
)
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, "TCP_KEEPALIVE"):
self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"])
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"]
)
recon_delay = self.opts["recon_default"]
if self.opts["recon_randomize"]:
recon_delay = randint(
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
)
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
recon_delay,
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, "RECONNECT_IVL_MAX"):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts["recon_default"] + self.opts["recon_max"],
)
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, self.opts["recon_max"])
if (self.opts["ipv6"] is True or ":" in self.opts["master_ip"]) and hasattr(
zmq, "IPV4ONLY"
):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, "_monitor") and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, "_stream"):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, "_socket"):
self._socket.close(0)
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@salt.ext.tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4506)) != 4506:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
log.debug(
"Connecting the Minion to the Master publish port, using the URI: %s",
self.master_pub,
)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
"""
Return the master publish port
"""
return _get_master_uri(
self.opts["master_ip"],
self.publish_port,
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_publish_port"),
)
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
"""
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
"""
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (
self.opts.get("__role") != "syndic"
and message_target not in ("broadcast", self.hexid)
) or (
self.opts.get("__role") == "syndic"
and message_target not in ("broadcast", "syndic")
):
log.debug("Publish received for not this minion: %s", message_target)
raise salt.ext.tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(
(
"Invalid number of messages ({0}) in zeromq pub"
"message from master"
).format(len(messages_len))
)
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise salt.ext.tornado.gen.Return(ret)
@property
def stream(self):
"""
Return the current zmqstream, creating one if necessary
"""
if not hasattr(self, "_stream"):
self._stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
return self._stream
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
"""
if callback is None:
return self.stream.on_recv(None)
@salt.ext.tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
"""
Multiprocessing target for the zmq queue device
"""
self.__setup_signals()
salt.utils.process.appendproctitle("MWorkerQueue")
self.context = zmq.Context(self.opts["worker_threads"])
# Prepare the zeromq sockets
self.uri = "tcp://{interface}:{ret_port}".format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts["mworker_queue_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting mworker_queue niceness to %d",
self.opts["mworker_queue_niceness"],
)
os.nice(self.opts["mworker_queue_niceness"])
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Setting up the master communication server")
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except (KeyboardInterrupt, SystemExit):
break
def close(self):
"""
Cleanly shutdown the router socket
"""
if self._closing:
return
log.info("MWorkerQueue under PID %s is closing", os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, "_monitor", None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, "_w_monitor", None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, "clients") and self.clients.closed is False:
self.clients.close()
if hasattr(self, "workers") and self.workers.closed is False:
self.workers.close()
if hasattr(self, "stream"):
self.stream.close()
if hasattr(self, "_socket") and self._socket.closed is False:
self._socket.close()
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
"""
Starts ZMQ monitor for debugging purposes.
:return:
"""
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
log.debug("Starting ZMQ monitor")
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug("ZMQ monitor has been started started")
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
"""
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Worker binding to socket %s", self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
self.stream.on_recv_stream(self.handle_message)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload):
"""
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
"""
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc: # pylint: disable=broad-except
exc_type = type(exc).__name__
if exc_type == "AuthenticationError":
log.debug(
"Minion failed to auth to master. Since the payload is "
"encrypted, it is not known which minion failed to "
"authenticate. It is likely that this is a transient "
"failure due to the master rotating its public key."
)
else:
log.error("Bad load from minion: %s: %s", exc_type, exc)
stream.send(self.serial.dumps("bad load"))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get("load"), dict):
log.error(
"payload and load must be a dict. Payload was: %s and load was %s",
payload,
payload.get("load"),
)
stream.send(self.serial.dumps("payload and load must be a dict"))
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if str("\0") in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {0} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload["enc"] == "clear" and payload.get("load", {}).get("cmd") == "_auth":
stream.send(self.serial.dumps(self._auth(payload["load"])))
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.send("Some exception handling minion payload")
log.error("Some exception handling a payload from minion", exc_info=True)
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.send(self.serial.dumps(ret))
elif req_fun == "send":
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == "send_private":
stream.send(
self.serial.dumps(
self._encrypt_private(ret, req_opts["key"], req_opts["tgt"],)
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.send("Server-side exception handling payload")
raise salt.ext.tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = "{0} received a ".format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += "SIGINT"
elif signum == signal.SIGTERM:
msg += "SIGTERM"
msg += ". Exiting"
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
"""
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
its host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
"""
if hasattr(zmq, "TCP_KEEPALIVE") and opts:
if "tcp_keepalive" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE, opts["tcp_keepalive"])
if "tcp_keepalive_idle" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, opts["tcp_keepalive_idle"])
if "tcp_keepalive_cnt" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, opts["tcp_keepalive_cnt"])
if "tcp_keepalive_intvl" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, opts["tcp_keepalive_intvl"])
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
"""
Encapsulate synchronous operations for a publisher channel
"""
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return salt.ext.tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get("pub_hwm", 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get("pub_hwm", 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get("pub_hwm", 1000))
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = "tcp://{interface}:{publish_port}".format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info("Starting the Salt Publisher on %s", pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug("Publish daemon getting data from puller %s", pull_uri)
package = pull_sock.recv()
log.debug("Publish daemon received payload. size=%d", len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(
unpacked_package
)
payload = unpacked_package["payload"]
log.trace("Accepted unpacked package from puller")
if self.opts["zmq_filtering"]:
# if you have a specific topic list, use that
if "topic_lst" in unpacked_package:
for topic in unpacked_package["topic_lst"]:
log.trace(
"Sending filtered data over publisher %s", pub_uri
)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(
hashlib.sha1(
salt.utils.stringutils.to_bytes(topic)
).hexdigest()
)
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent")
# Syndic broadcast
if self.opts.get("order_masters"):
log.trace("Sending filtered data to syndic")
pub_sock.send(b"syndic", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent to syndic")
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace(
"Sending broadcasted data over publisher %s", pub_uri
)
pub_sock.send(b"broadcast", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Broadcasted data has been sent")
else:
log.trace(
"Sending ZMQ-unfiltered data over publisher %s", pub_uri
)
pub_sock.send(payload)
log.trace("Unfiltered data has been sent")
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except KeyboardInterrupt:
log.trace("Publish daemon caught Keyboard interupt, tearing down")
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
"""
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
"""
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
"""
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
"""
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
"""
Disconnect an existing publisher socket and remove it from the local
thread's cache.
"""
if hasattr(self._sock_data, "sock"):
self._sock_data.sock.close()
delattr(self._sock_data, "sock")
def publish(self, load):
"""
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list":
int_payload["topic_lst"] = load["tgt"]
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts["zmq_filtering"] and load["tgt_type"] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load["tgt"], tgt_type=load["tgt_type"])
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
"Sending payload to publish daemon. jid=%s size=%d",
load.get("jid", None),
len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug("Sent payload to publish daemon.")
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(
AsyncReqMessageClient, opts, args=args, kwargs=kwargs
)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: unit tests!
class AsyncReqMessageClient(object):
"""
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
"""
def __init__(self, opts, addr, linger=0, io_loop=None):
"""
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
"""
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
try:
if self._closing:
return
except AttributeError:
# We must have been called from __del__
# The python interpreter has nuked most attributes already
return
else:
self._closing = True
if hasattr(self, "stream") and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, "stream"):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, "RECONNECT_IVL_MAX"):
self.socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith("tcp://["):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, "IPV6"):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, "IPV4ONLY"):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug("Trying to connect to: %s", self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self.socket, io_loop=self.io_loop
)
@salt.ext.tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=broad-except
log.debug("Re-init ZMQ socket: %s", err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
"""
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
"""
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
message, timeout=future.timeout, tries=future.tries, future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(
self, message, timeout=None, tries=3, future=None, callback=None, raw=False
):
"""
Return a future which will be completed when the message has a response
"""
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message
)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
"""
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
"""
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(
self._monitor_socket, io_loop=io_loop
)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith("EVENT_"):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt["description"] = self.event_map[evt["event"]]
log.debug("ZeroMQ event: %s", evt)
if evt["event"] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BGLd shutdown."""
from test_framework.test_framework import BGLTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BGLTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
toster.py
|
# This file contains simple python module
# for communication with the server
from threading import Thread, RLock
import sys
import json
__infoCallbacks = []
__requestCallbacks = []
__communicationThread = None
__localLock = RLock()
def registerInfoCallback(cb):
__localLock.acquire()
__infoCallbacks.append(cb)
__localLock.release()
def registerRequestCallback(cb):
__localLock.acquire()
__requestCallbacks.append(cb)
__localLock.release()
def __innerLoop():
while True:
isLocked = False
try:
data = input()
parsed = json.loads(data, strict=False)
__localLock.acquire()
isLocked = True
if parsed["type"] == "info":
for cb in __infoCallbacks:
cb(parsed)
elif parsed["type"] == "request":
for cb in __requestCallbacks:
cb(parsed)
except json.JSONDecodeError:
continue
finally:
if isLocked:
__localLock.release()
isLocked = False
def start(block=False):
__communicationThread = Thread(target=__innerLoop, daemon=True)
__communicationThread.start()
if block:
__communicationThread.join()
def sendResponse(req: dict, msg: dict):
reqCopy = req.copy()
reqCopy["type"] = "response"
reqCopy["msg"] = msg
print(json.dumps(reqCopy))
sys.stdout.flush()
def sendInfo(msg: dict):
infoData = {
"type": "info",
"msg": msg
}
print(json.dumps(infoData))
sys.stdout.flush()
|
itachi.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import io,os,re,ast,six,sys,glob,json,time,timeit,codecs,random,shutil,urllib,urllib2,urllib3,goslate,html5lib,requests,threading,wikipedia,subprocess,googletrans ,pytz
from gtts import gTTS
from random import randint
from time import sleep
from urllib import urlopen, urlretrieve, urlencode
from io import StringIO
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
if (six.PY2):
import urllib2
import urllib
else:
import urllib.request
import urllib.parse
acil = LINETCR.LINE()
acil.login(token="TOKEN LU")
acil.loginResult()
ki = PRANKBOTS.LINE()
ki.login(token="Token asist")
ki.loginResult()
ki2= PRANKBOTS.LINE()
ki2.login(token="token asist")
ki2.loginResult()
ki3 = PRANKBOTS.LINE()
ki3.login(token="Token asist")
ki3.loginResult()
print "GyeVha Sucsses Login\nJangan Sombong Bos\nSelama Menggunakan Sc Orang"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage="""(F༘༘̈́̈́B༘̈́̈́L༘̈́̈́ ̈́V༘̈́̈́IC༘̈́̈́K༘̈́̈́Ÿ༘́ •в̰̰̈́ä̰̰́т̰̰̈́❍ṵ̰̰̈́s̰̰ä̰̰̰́ḭ̰̰̈́•)
╔══════════════╗
F༘༘̈́̈́B༘̈́̈́L༘̈́̈́•в̰̰̈́ä̰̰́т̰̰̈́❍ṵ̰̰̈́s̰̰ä̰̰̰́ḭ̰̰̈́•
╚══════════════╝
╔══════════════╗
║ 🇲🇨 Me
║ 🇲🇨 Add
║ 🇲🇨 Gift
║ 🇲🇨 Spam gift️
║ 🇲🇨 Cn "text"
║ 🇲🇨 Clockname "text"
║ 🇲🇨 TL:"text"
║ 🇲🇨 Ban:"mid"
║ 🇲🇨 Unban:"mid"
║ 🇲🇨 Bl:on
║ 🇲🇨 Unbl:on
║ 🇲🇨 Mcheck
║ 🇲🇨 Mybio:
║ 🇲🇨 Mybots
║ 🇲🇨 Mymid
║ 🇲🇨 Mygroups
║ 🇲🇨 Group id
║ 🇲🇨 Message set:"text"
║ 🇲🇨 Message confirm
║ 🇲🇨 Msg add-"text"
║ 🇲🇨 Com set:"text"
║ 🇲🇨 Comment
║ 🇲🇨 Comban/del/cek
║ 🇲🇨 Help set:"text"
║ 🇲🇨 Change
║ 🇲🇨 Gn "text"
║ 🇲🇨 Clink/Curl
║ 🇲🇨 Kick:"mid"
║ 🇲🇨 Invite:"mid"
║ 🇲🇨 Creator
║ 🇲🇨 Gcancel:"jumlah"
║ 🇲🇨 Gcancelall
║ 🇲🇨 Ginfo
║ 🇲🇨 Check
║ 🇲🇨 Cctv
║ 🇲🇨 Glink
║ 🇲🇨 Spam on/off
║ 🇲🇨 Gurl
║ 🇲🇨 Clink
║ 🇲🇨 Blocklist
║ 🇲🇨 Banlist
║ 🇲🇨 Update
║ 🇲🇨 Creator
║ 🇲🇨 Sc:"mid"
║ 🇲🇨 Ban "@"
║ 🇲🇨 Unban "@"
║ 🇲🇨 Sc @
║ 🇲🇨 Nuke
║ 🇲🇨 Backup
║ 🇲🇨 Tagall
║ 🇲🇨 Kick@mbl
║ 🇲🇨 Reinvite
║ 🇲🇨 Conban
║ 🇲🇨 Clearban
║ 🇲🇨 Gid
║ 🇲🇨 Grupname
║ 🇲🇨 Lurk:on/off
║ 🇲🇨 Lurkers
║ 🇲🇨 Wc️
║ 🇲🇨 Sp
║ 🇲🇨 stafflist
║ 🇲🇨 Reboot
║ 🇲🇨 Leaveallgroup
║ 🇲🇨 Pmfavorite
║ 🇲🇨 Broken
║╔═════════════╗
║║ 🛡 { Media }
║║ 🛡 {Translate}
║️║ 🛡 { Set }
║║ 🛡 {Helpbots}
║║ 🛡 {Settings}
║║ 🛡 {Setauto}
║╚═════════════╝
╚══════════════╝
"""
helpMedia= """(🇲🇨GYEVHABOTH🇲🇨)
╔══════════════╗
🇲🇨GYEVHABOTH🇲🇨
╚══════════════╝
╔══════════════╗
║ 🇲🇨 Youtube *text*
║ 🇲🇨 Youtubesearch *user*
║ 🇲🇨 Audio "text"
║ 🇲🇨 Lirik "text"
║ 🇲🇨 Ig "name"
║ 🇲🇨 Tts "judul/nama band"
║ 🇲🇨 Gimage
║ 🇲🇨 Image *text*
║ 🇲🇨 Google *text*
║ 🇲🇨 Micadd @
║ 🇲🇨 Micdel @
║ 🇲🇨 Miclist
║ 🇲🇨 Picturl @
║ 🇲🇨 Coverurl @
║ 🇲🇨 Copy @
║ 🇲🇨 Getname @
║ 🇲🇨 Getinfo @
║ 🇲🇨 pict @️
║ 🇲🇨 Getcontact @
║ 🇲🇨 Getvid @
║ 🇲🇨 Getmid @
║ 🇲🇨 Copy @
║ 🇲🇨 Recopy
║ 🇲🇨 Getcover @
║ 🇲🇨 Getbio @
║ 🇲🇨 Getinfo @
║ 🇲🇨 youinfo @
║ 🇲🇨 info "mid"
║ 🇲🇨 Contact "mid"
║ 🇲🇨 Id "idline"
║ 🇲🇨 Memlist
║ 🇲🇨 Setimage:
║ 🇲🇨 Papimage
║ 🇲🇨 Setvideo:
║ 🇲🇨 Papvideo
║ 🇲🇨 Checkdate
║ 🇲🇨 Myname
║ 🇲🇨 Mybio
║ 🇲🇨 Mypict
║ 🇲🇨 Myvid
║ 🇲🇨 Urlpict
║ 🇲🇨 Mycover
║ 🇲🇨 Urlcover
║ 🇲🇨 Hay "text"
║ 🇲🇨 Record "text"
║ 🇲🇨 Xvideo "text"
║ 🇲🇨 Smule "id smule"
║ 🇲🇨 Time
║ 🇲🇨 Imagetxt "text"
║ 🇲🇨 Cuaca*txt
║ 🇲🇨 Lokasi*txt
║ 🇲🇨 Shalat*txt
║ 🇲🇨 Anime"text"
║ 🇲🇨 Cekmovie"text"
║ 🇲🇨 Video"text"
║ 🇲🇨 Playstore"txt"
║ 🇲🇨 Twitter*txt
║ 🇲🇨 Klip"text"
║ 🇲🇨 Github*txt
║ 🇲🇨 Facebook*txt
║ 🇲🇨 Wikipedia*txt
║ 🇲🇨 Checkdate*ttl
║ 🇲🇨 Virus
║╔═════════════╗
║║ 🛡 sendpm "text"
║║ 🛡 sendgrup "text"
║║ 🛡 { Media }
║║ 🛡 {Translate}
║️║ 🛡 { Set }
║║ 🛡 {Helpbots}
║║ 🛡 {Settings}
║║ 🛡 {Setauto}
║╚═════════════╝
╚══════════════╝
"""
helpFun= """(🇲🇨GYEVHABOTH🇲🇨)
╔══════════════╗
🇲🇨GYEVHABOTH🇲🇨
╚══════════════╝
╔══════════════╗
║ 🇲🇨 sider:*txt*
║ 🇲🇨 tagme:*txt
║ 🇲🇨 welcome:*txt
║ 🇲🇨 left:*txt
║ 🇲🇨 message set:*txt*
║ 🇲🇨 STKID:*sticker id
║ 🇲🇨 STKPKGID:*stkr gid
║ 🇲🇨 STKVER:*version
║ 🇲🇨 cekresponse
║╔═════════════╗
║║ 🛡 sendpm text
║║ 🛡 sendgrup text
║║ 🛡 { Media }
║║ 🛡 {Translate}
║️║ 🛡 { Set }
║║ 🛡 {Helpbots}
║║ 🛡 {Settings}
║║ 🛡 {Setauto}
║╚═════════════╝
╚══════════════╝
"""
helpself= """(🇲🇨GYEVHABOTH🇲🇨)
╔══════════════╗
🇲🇨GYEVHABOTH🇲🇨
╚══════════════╝
╔══════════════╗
║ 🇲🇨 Fuck1/10 "@"
║ 🇲🇨 Kick1/10 "@"
║ 🇲🇨 All mid
║ 🇲🇨 Reinvite
║ 🇲🇨 B1-10 mid
║ 🇲🇨 B1-10name "text"
║ 🇲🇨 B1-10
║ 🇲🇨 B1-10 gift
║ 🇲🇨 B1-10 in
║ 🇲🇨 B1-10 bye
║ 🇲🇨 Bc "text"
║ 🇲🇨 Say "text"
║ 🇲🇨 Bom "text"
║ 🇲🇨 Allgift
║ 🇲🇨 Spam gift️
║ 🇲🇨 Botcopy
║ 🇲🇨 Botbackup
║ 🇲🇨 Botpict
║ 🇲🇨 Botcover
║ 🇲🇨 Botak
║ 🇲🇨 Allname "nama"
║ 🇲🇨 Allbio "status"
║ 🇲🇨 Sendcontact "text"
║ 🇲🇨 Botbyeall
║╔═════════════╗
║║ 🛡 sendpm text
║║ 🛡 sendgrup text
║║ 🛡 { Media }
║║ 🛡 {Translate}
║️║ 🛡 { Set }
║║ 🛡 {Helpbots}
║║ 🛡 {Settings}
║║ 🛡 {Setauto}
║╚═════════════╝
╚══════════════╝
"""
helfset= """ (🇲🇨GYEVHABOTH🇲🇨)
╔══════════════╗
🇲🇨GYEVHABOTH🇲🇨
╚══════════════╝
╔══════════════╗
║ 🇲🇨 Ban:on/Unbl:on
║ 🇲🇨 Contact:on/off
║ 🇲🇨 Add:on/off
║ 🇲🇨 Join:on/off
║ 🇲🇨 Leave:on/off
║ 🇲🇨 Share:on/off
║ 🇲🇨 Com:on/off
║ 🇲🇨 Clock:on/off
║ 🇲🇨 Respon:on/off
║ 🇲🇨 Stickertag:on/off
║ 🇲🇨 Welcome:on/off
║ 🇲🇨 Left:on/off
║ 🇲🇨 Sider:on/off
║ 🇲🇨 Notag:on/off
║ 🇲🇨 Mimic on/off
║ 🇲🇨 Simsimi:on/off
║ 🇲🇨 Read:0n/off
║ 🇲🇨 Like:on/off
║ 🇲🇨 Runtime
║╔══════════════╗
║║🛡 Pro:on/off
║║🛡 Prolink:on/off
║║🛡 Proinvite:on/off
║║🛡 Procancel:on/off
║║🛡 Namelock:on/off
║║🛡 Projoin:on/off
║║🛡 Allprotect:on/off
║╚══════════════╝
╚═══════════════╝
"""
KAC=[acil,ki,ki2,ki3]
mid = acil.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
Bots=[mid,kimid,ki2mid,ki3mid]
admsa = "u91b4d82e07fbb74ec6d87f1d620f3b8c"
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'tagme':"🇲🇨Woi\nNgapa Ngetag Aim 🙄",
'sider1':"message sider belum di set",
'joingc':"🐺 WELLCOME TO 🐺",
'leftgc':"🌪 Hoanjai Tuh Orang Baper Kayanya\nBesok Besok Langsung Hapus Akun Aja Yak 🌫🌫🌫🌫",
"stickerMention":False,
'message':"""Haiii Thanks Sudah Add Salam Kenalll F༘༘̈́̈́B༘̈́̈́L༘̈́̈́ ̈́V༘̈́̈́IC༘̈́̈́K༘̈́̈́Ÿ༘́ •в̰̰̈́ä̰̰́т̰̰̈́❍ṵ̰̰̈́s̰̰ä̰̰̰́ḭ̰̰̈́•""",
"lang":"JP",
"comment":"I Like It F༘༘̈́̈́B༘̈́̈́L༘̈́̈́ ̈́V༘̈́̈́IC༘̈́̈́K༘̈́̈́Ÿ༘́ •в̰̰̈́ä̰̰́т̰̰̈́❍ṵ̰̰̈́s̰̰ä̰̰̰́ḭ̰̰̈́•",
"comment1":"I Like It F༘༘̈́̈́B༘̈́̈́L༘̈́̈́ ̈́V༘̈́̈́IC༘̈́̈́K༘̈́̈́Ÿ༘́ •в̰̰̈́ä̰̰́т̰̰̈́❍ṵ̰̰̈́s̰̰ä̰̰̰́ḭ̰̰̈́•",
"commentOn":False,
"likeOn":True,
"wcOn":True,
"leftOn":True,
"alwayRead":False,
"Removechat":False,
"detectMention":False,
"kickMention":False,
"cpp":True,
"steal":False,
'pap':{},
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"atjointicket":True,
"potoMention":{},
"prankName":True,
"Sider":{},
"cyduk":{},
"pname":{},
"pro_name":{},
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read)
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def updateProfilePicture(self, path):
file=open(path, 'rb')
files = {
'file': file
}
params = {
'name': 'media',
'type': 'image',
'oid': self.profile.mid,
'ver': '1.0',
}
data={
'params': json.dumps(params)
}
r = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/p/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Update profile picture failure.')
return True
def sendVideo(self, to_, path):
M = Message(to=to_, text=None, contentType = 2)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'video',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
#print r
if r.status_code != 201:
raise Exception('Upload video failure.')
return True
def sendVideoWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download video failure.')
try:
self.sendVideo(to_, path)
except Exception as e:
raise (e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
try:
self.sendAudio(to_, path)
except Exception as e:
raise (e)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def fullwidth(text):
'''converts a regular string to Unicode Fullwidth
Preconditions: text, a string'''
translator = ''
translator = translator.maketrans('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[]^_`{|}~' , '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!゛#$%&()*+、ー。/:;〈=〉?@[]^_‘{|}~')
return text.translate(translator)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = acil.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = acil.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
acil.acceptGroupInvitation(op.param1)
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
acil.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace(" ",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
acil.acceptGroupInvitationByTicket(list_[1],list_[2])
G = acil.getGroup(list_[1])
G.preventJoinByTicket = True
acil.updateGroup(G)
except:
acil.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
acil.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
acil.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
ki2.like(url[25:58], url[66:], likeType=1001)
ki3.like(url[25:58], url[66:], likeType=1001)
ki4.like(url[25:58], url[66:], likeType=1001)
ki5.like(url[25:58], url[66:], likeType=1001)
ki6.like(url[25:58], url[66:], likeType=1001)
ki7.like(url[25:58], url[66:], likeType=1001)
ki8.like(url[25:58], url[66:], likeType=1001)
ki9.like(url[25:58], url[66:], likeType=1001)
ki10.like(url[25:58], url[66:], likeType=1001)
acil.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment1"])
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.comment(url[25:58], url[66:], wait["comment1"])
ki6.comment(url[25:58], url[66:], wait["comment1"])
ki7.comment(url[25:58], url[66:], wait["comment1"])
ki8.comment(url[25:58], url[66:], wait["comment1"])
ki9.comment(url[25:58], url[66:], wait["comment1"])
ki10.comment(url[25:58], url[66:], wait["comment1"])
#-----------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = acil.getGroup(op.param1)
except:
try:
G = ki1.getGroup(op.param1)
except:
try:
G = ki2.getGroup(op.param1)
except:
try:
G = ki3.getGroup(op.param1)
except:
try:
G = ki4.getGroup(op.param1)
except:
try:
G = ki5.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
acil.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki3.updateGroup(G)
except:
try:
ki4.updateGroup(G)
except:
try:
ki5.updateGroup(G)
except:
pass
if op.param2 in Bots:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
acil.sendText(msg.to, "🤠" + data['result']['response'].encode('utf-8'))
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['detectMention'] == True:
contact = acil.getContact(msg.from_)
cName = contact.pictureStatus
balas = ["http://dl.profile.line-cdn.net/" + cName]
ret_ = random.choice(balas)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
acil.sendImageWithURL(msg.to,ret_)
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
acil.sendMessage(msg)
acil.sendText(msg.to, wait["tagme"])
break
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['stickerMention'] == True:
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
msg.contentType = 7
msg.text = ''
msg.contentMetadata = {
'STKPKGID': 1,
'STKTXT': '[]',
'STKVER': 100,
'STKID':110
}
acil.sendText(msg.to, wait["tagme"])
acil.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = acil.getContact(msg.from_)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in mid:
acil.sendText(msg.to,"don't tag me")
acil.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = acil.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
acil.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
acil.findAndAddContactsByMid(target)
acil.inviteIntoGroup(msg.to,[target])
acil.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
acil.sendText(msg.to,"Error")
wait['invite'] = False
break
if wait["alwayRead"] == True:
if msg.toType == 0:
acil.sendChatChecked(msg.from_,msg.id)
else:
acil.sendChatChecked(msg.to,msg.id)
if wait["Removechat"] == True:
if msg.toType == 0:
acil.removeAllMessages(op.param2)
else:
acil.removeAllMessages(op.param2)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
acil.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
acil.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
acil.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
acil.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
acil.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
acil.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
try:
acil.sendImageWithURL(msg.to, "http://dl.profile.line-cdn.net/" + contact.pictureStatus)
except:
cu = ""
acil.sendText(msg.to,"🎀═displayName═🎀\n✤[" + contact.displayName + "]✤\n🎀═MIDs═🎀\n✤[" + msg.contentMetadata["mid"] + "]✤\n🎀═StatusContact═🎀\n✤" + contact.statusMessage + "✤")
acil.sendText(msg.to,"LINKPROFILE\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\nLINKBERANDA\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
acil.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Botallbye"]:
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
gid = ki6.getGroupIdsJoined()
gid = ki7.getGroupIdsJoined()
gid = ki8.getGroupIdsJoined()
gid = ki9.getGroupIdsJoined()
gid = ki10.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
ki6.leaveGroup(i)
ki7.leaveGroup(i)
ki8.leaveGroup(i)
ki9.leaveGroup(i)
ki10.leaveGroup(i)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#--------------------------
elif msg.text in ["Leaveallgroup"]:
gid = acil.getGroupIdsJoined()
for i in gid:
acil.leaveGroup(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#----------------------------------------------
elif "Sendgrup " in msg.text:
bctxt = msg.text.replace("Sendgrup ", "")
n = acil.getGroupIdsJoined()
for manusia in n:
acil.sendText(manusia, (bctxt))
elif "Sendcontact " in msg.text:
bctxt = msg.text.replace("Sendcontact ", "")
t = ki.getAllContactIds()
t = ki2.getAllContactIds()
t = ki3.getAllContactIds()
t = ki4.getAllContactIds()
t = ki5.getAllContactIds()
t = ki6.getAllContactIds()
t = ki7.getAllContactIds()
t = ki8.getAllContactIds()
t = ki9.getAllContactIds()
t = ki10.getAllContactIds()
for manusia in t:
ki.sendText(manusia,(bctxt))
ki2.sendText(manusia,(bctxt))
ki3.sendText(manusia,(bctxt))
ki4.sendText(manusia,(bctxt))
ki5.sendText(manusia,(bctxt))
ki6.sendText(manusia,(bctxt))
ki7.sendText(manusia,(bctxt))
ki8.sendText(manusia,(bctxt))
ki9.sendText(manusia,(bctxt))
ki10.sendText(manusia,(bctxt))
elif "Sendpm " in msg.text:
bctxt = msg.text.replace("Sendpm ", "")
t = acil.getAllContactIds()
for manusia in t:
acil.sendText(manusia, (bctxt))
elif "Virus" in msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': "BEBAS,'"}
acil.sendMessage(msg)
elif msg.text in ["Stafflist"]:
if Bots == []:
acil.sendText(msg.to,"The Friends is empty")
else:
acil.sendText(msg.to,"Tunggu...")
mc = "||===FRIENDLIST===||\n=====================\n"
for mi_d in Bots:
mc += "★" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
print "[Command]Friendlist executed"
elif "Youinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
acil.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
acil.sendText(msg.to,"Profile Picture " + contact.displayName)
acil.sendImageWithURL(msg.to,image)
acil.sendText(msg.to,"Cover " + contact.displayName)
acil.sendImageWithURL(msg.to,path)
except:
pass
elif "Botak" in msg.text:
group = acil.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki.sendMessage(msg)
elif "Github " in msg.text:
a = msg.text.replace("Github ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type: GitHub Search\nStatus: Processing...")
acil.sendText(msg.to, "Title: " + a + "\nLink: https://github.com/search?utf8=✓&q="+b)
elif 'playstore ' in msg.text.lower():
tob = msg.text.lower().replace('playstore ',"")
acil.sendText(msg.to,"Please wait...")
acil.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
acil.sendText(msg.to,"This is link aplication")
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except Exception as e:
acil.sendText(msg.to, str(e))
elif "Twitter " in msg.text:
a = msg.text.replace("Twitter ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Processing")
acil.sendText(msg.to, "https://www.twitter.com" + b)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Success")
elif "Smule " in msg.text:
a = msg.text.replace("Smule ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching to id smule..")
acil.sendText(msg.to, "Nama: "+b+"\nId smule: http://smule.com/" +b)
elif "Google " in msg.text:
a = msg.text.replace("Google ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching google..")
acil.sendText(msg.to, "Search: "+b+"\nsuccess: http://google.com/" +b)
elif "Xvideos " in msg.text:
a = msg.text.replace("Xvideos ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching ....\n" "Type:Search Xvideos\nStatus: Processing")
acil.sendText(msg.to, "{ Xvideos search page }\n\nTitle: "+b+"\nSource : Xvideos\nhttp://xvideos.com/?k=" +b)
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Coverurl @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text in ["Pmfavorite"]:
dj = acil.getFavoriteMids()
kontak = acil.getContacts(dj)
num = 1
family = str(len(dj))
msgs = "[List Favorite Friends Guys]"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text.lower() == 'setauto':
acil.sendText(msg.to,helpFun)
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMessage)
else:
acil.sendText(msg.to,helpMessage)
elif msg.text.lower() == 'media':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMedia)
else:
acil.sendText(msg.to,helpMedia)
elif msg.text.lower() == 'helpbots':
if wait["lang"] == "JP":
ki.sendText(msg.to,helpself)
else:
acil.sendText(msg.to,helpself)
elif msg.text.lower() == 'settings':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpset)
else:
acil.sendText(msg.to,helpset)
elif ("Gn:" in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn:","")
ki.updateGroup(group)
else:
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
acil.updateGroup(group)
else:
acil.sendText(msg.to,"Can not be used for groups other than")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
acil.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
acil.findAndAddContactsByMid(midd)
acil.inviteIntoGroup(msg.to,[midd])
elif "Me" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
acil.sendMessage(msg)
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
acil.sendText(msg.to,van)
elif "Mybots" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
acil.sendMessage(msg)
elif "Respon" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
ki10.sendMessage(msg)
ki.sendText(msg.to,"AMAN TERKENDALI KOMANDAN 👮")
elif "B1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "B2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "B3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "B4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "B5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif "B6" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif "B7" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
ki7.sendMessage(msg)
elif "B8" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
ki8.sendMessage(msg)
elif "B9" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
ki9.sendMessage(msg)
elif "B10" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
ki10.sendMessage(msg)
elif "Creator" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u0869b63018180639efeb395436479e02'}
acil.sendMessage(msg)
elif msg.text in ["Allgift","B1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","i gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
acil.sendMessage(msg)
elif msg.text in ["Allgift","B2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Allgift","B3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Allgift","B4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Allgift","B5 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki5.sendMessage(msg)
elif msg.text in ["Allgift","B6 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
ki6.sendMessage(msg)
elif msg.text in ["Allgift","B7 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
ki7.sendMessage(msg)
elif msg.text in ["Allgift","B8 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '13'}
msg.text = None
ki8.sendMessage(msg)
elif msg.text in ["Allgift","B9 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '11'}
msg.text = None
ki9.sendMessage(msg)
elif msg.text in ["Allgift","B10 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki10.sendMessage(msg)
elif msg.text in ["Spam gift"]:
#if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["Clink"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = False
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Curl"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = True
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL close 👈")
else:
acil.sendText(msg.to,"URL close 👈")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group 👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than ")
elif msg.text.lower() == 'ginfo':
group = acil.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md ="✥GROUP NAME✥\n" + group.name + "\n\n✥GROUP ID✥\n✿" + group.id +"✿" "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\n✥TOTAL MEMBER✥\n" + str(len(group.members)) + " Orang" + "\n✥PENDINGAN✥\n" + str(len(group.invitee)) + " Orang"
acil.sendText(msg.to,md)
elif "Mymid" == msg.text:
acil.sendText(msg.to,mid)
elif "B1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "B2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "B3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "B4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "B5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "B6 mid" == msg.text:
ki6.sendText(msg.to,ki6mid)
elif "B7 mid" == msg.text:
ki7.sendText(msg.to,ki7mid)
elif "B8 mid" == msg.text:
ki8.sendText(msg.to,ki8mid)
elif "B9 mid" == msg.text:
ki9.sendText(msg.to,ki9mid)
elif "B10 mid" == msg.text:
ki10.sendText(msg.to,ki10mid)
elif "All mid" == msg.text:
acil.sendText(msg.to,mid)
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
ki6.sendText(msg.to,ki6mid)
ki7.sendText(msg.to,ki7mid)
ki8.sendText(msg.to,ki8mid)
ki9.sendText(msg.to,ki9mid)
ki10.sendText(msg.to,ki10mid)
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
acil.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+acil.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname " in msg.text:
string = msg.text.replace("Allname ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki7.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki8.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki9.getProfile()
profile.displayName = string
ki9.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki10.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
acil.sendText(msg.to,"NAMA BOT BERHASIL DI TERAPKAN MENJADI\n👉" + string + "👈")
elif "Allbio " in msg.text:
string = msg.text.replace("Allbio ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki6.getProfile()
profile.statusMessage = string
ki6.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki7.getProfile()
profile.statusMessage = string
ki7.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki8.getProfile()
profile.statusMessage = string
ki8.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki9.getProfile()
profile.statusMessage = string
ki9.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki10.getProfile()
profile.statusMessage = string
ki10.updateProfile(profile)
acil.sendText(msg.to,"SEMUA TELAH DI UPDATE BIO PROFILE\n👉" + string + "👈")
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = acil.getProfile()
profile.statusMessage = string
acil.updateProfile(profile)
acil.sendText(msg.to,"Update Bio\n👉" + string + "👈")
#------------------------------------------------------------------------------------------#
elif "Cn " in msg.text:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = acil.getProfile()
profile.displayName = string
acil.updateProfile(profile)
acil.sendText(msg.to,"Update Names👉 " + string + "👈")
#---------------------------------------------------------
elif "B1name " in msg.text:
string = msg.text.replace("B1name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B2name " in msg.text:
string = msg.text.replace("B2name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B3name " in msg.text:
string = msg.text.replace("B3name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B4name " in msg.text:
string = msg.text.replace("B4name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
#--------------------------------------------------------
elif "B5name " in msg.text:
string = msg.text.replace("B5name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to," Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B6name " in msg.text:
string = msg.text.replace("B6name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
ki6.sendText(msg.to,"Update Names��" + string + "👈")
#---------------------------------------------------------
elif "B7name " in msg.text:
string = msg.text.replace("B7name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki7.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
ki7.sendText(msg.to,"Update Names👉" + string + "👈")
#---------------------------------------------------------
elif "B8name " in msg.text:
string = msg.text.replace("B8name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki8.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
ki8.sendText(msg.to,"Update Names👉" + string + "👈")
#---------------------------------------------------------
elif "B9name " in msg.text:
string = msg.text.replace("B9name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki9.getProfile()
profile.displayName = string
ki9.updateProfile(profile)
ki9.sendText(msg.to,"Update Names👉" + string + "👈")
#---------------------------------------------------------
elif "B10name " in msg.text:
string = msg.text.replace("B10name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki10.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
ki10.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "Contact " in msg.text:
mmid = msg.text.replace("Contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
acil.sendMessage(msg)
elif msg.text in ["Allprotect:on"]:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable✔")
else:
acil.sendText(msg.to,"It is already On ✔")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ¨")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN ON")
else:
acil.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = acil.getGroup(msg.to).name
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
#=====================================================================================
elif msg.text in ["Allprotect:off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ✔")
else:
acil.sendText(msg.to,"sudah dimatikan ✔")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
acil.sendText(msg.to,"ALREADY OFF")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
#=====================================================================================
elif msg.text.lower() == 'contact:on':
if wait["contact"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Sudah On")
else:
acil.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already open ✔")
else:
acil.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact:off':
if wait["contact"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah off ✖")
else:
acil.sendText(msg.to,"It is already off ✖")
else:
wait["contact"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"off already")
else:
acil.sendText(msg.to,"already Close ✔")
elif msg.text in ["Pro:on"]:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable ??✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text in ['Prolink:on']:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ['Proinvite:on']:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ¨")
elif msg.text in ['Procancel:on']:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text.lower() == 'join:on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Ini sudah on✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text.lower() == 'blocklist':
blockedlist = acil.getBlockedContactIds()
acil.sendText(msg.to, "Please wait...")
kontak = acil.getContacts(blockedlist)
num=1
msgs="✖User Blocked List✖\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text.lower() == 'join:off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Join Already Off✔")
else:
acil.sendText(msg.to,"Auto Join set off✔")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✔")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Pro:off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ✔")
else:
acil.sendText(msg.to,"sudah dimatikan ✔")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Prolink:off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Proinvite:off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Procancel:off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif "Join:" in msg.text:
try:
strnum = msg.text.replace("Join:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Itu off undangan ditolak✖\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan✖")
else:
acil.sendText(msg.to,"Off undangan ditolak✖Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis✔")
else:
acil.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
acil.sendText(msg.to,"")
else:
acil.sendText(msg.to,"Weird value✖")
elif msg.text in ["Leave:on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"on✔")
else:
acil.sendText(msg.to,"Sudah terbuka ✔")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"Is already open✔")
elif msg.text in ["Leave:off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"off✖")
else:
acil.sendText(msg.to,"Sudah off✖")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"Is already close✔")
elif msg.text in ["Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✖")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"on ✔")
else:
acil.sendText(msg.to,"on ✔")
elif msg.text in ["Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"It is already turned off ✔")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off ✖")
else:
acil.sendText(msg.to,"Off ✖")
elif msg.text.lower() == 'set':
md = "╔▬▬℘ґ∂ηк в❍тs▬▬╗\n║▬▬▬℘▬▬в▬▬▬║\n"
if wait["likeOn"] == True: md+="║☆║Like:ON➡️📱\n"
else: md+="║☆║Like:OFF➡️📴\n"
if wait["wcOn"] == True: md+="║☆║Welcome:ON➡️📱\n"
else: md+="║☆║Welcome:OFF➡️📴\n"
if wait["leftOn"] == True: md+="║☆║Left:ON➡️📱\n"
else: md+="║☆║Left:OFF➡️📴\n"
if wait["detectMention"] == True: md+="║☆║Respon:ON➡️📱\n"
else: md +="║☆║Respon:OFF➡️📴\n"
if wait["stickerMention"] == True: md+="║☆║Stickertag:ON➡️📱\n"
else: md +="║☆║Stickertag:OFF➡️📴\n"
if settings["simiSimi"] == True: md+="║☆║Simisimi:ON➡️📱\n"
else: md+="║☆║Simisimi:OFF➡️📴\n"
if wait["alwayRead"] == True: md+="║☆║Auto read:ON➡️📱\n"
else: md+="║☆║Auto read:OFF➡️📴\n"
if wait["Sider"] == True: md+="║☆║Sider:ON➡️📱\n"
else: md+="║☆║Sider:OFF➡️📴\n"
if wait["kickMention"] == True: md+="║☆║Notag:ON➡️📱\n"
else:md+="║☆║Notag:OFF➡️📴\n"
if wait["contact"] == True: md+="║☆║Contact:ON➡️📱\n"
else: md+="║☆║Contact:OFF➡️📴\n"
if wait["autoJoin"] == True: md+="║☆║Join:ON➡️📱\n"
else: md +="║☆║Join:OFF➡️📴\n"
if wait["autoCancel"]["on"] == True:md+="║☆║Cancel:" + str(wait["autoCancel"]["members"]) + "➡️📱\n"
else: md+= "║☆║Cancel:OFF➡️📴\n"
if wait["leaveRoom"] == True: md+="║☆║Leave:ON➡️📱\n"
else: md+="║☆║Leave:OFF➡️📴\n"
if wait["timeline"] == True: md+="║☆║Share:ON➡️📱\n"
else:md+="║☆║Share:OFF➡️📴\n"
if wait["autoAdd"] == True: md+="║☆║Add:ON➡️📱\n"
else:md+="║☆║Add:OFF➡️??\n"
if wait["commentOn"] == True: md+="║☆║Com:ON➡️📱\n"
else:md+="║☆║Com:OFF➡️📴\n║▬║❨◄▬▬▬►❩\n║☆║◄═PROTECTION═►\n║▬║▬GYE_VHA▬╣\n"
if wait["protect"] == True: md+="║☆║Pro:ON➡️📱\n"
else:md+="║☆║Pro:OFF➡️📴\n"
if wait["linkprotect"] == True: md+="║☆║ProtectQr:ON➡️📱\n"
else:md+="║☆║ProtectQr:OFF➡️📴\n"
if wait["inviteprotect"] == True: md+="║☆║Proinvite:ON➡️📱\n"
else:md+="║☆║Proinvite:OFF➡️📴\n"
if wait["cancelprotect"] == True: md+"║☆║Procancel:ON➡️📱\n"
else:md+="║☆║Procancel:OFF➡️📴\n"
if wait["pname"] == True: md+="║☆║Namelock:ON➡️📱\n"
else: md+="║☆║Namelock:OFF➡️📴\n"
acil.sendText(msg.to,md + "║▬▬▬℘▬▬в▬▬▬║\n╚▬▬GYE_VHA▬▬╝")
elif "Creatorgrup" == msg.text:
try:
group = acil.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
GS = acil.getContact(msg.to)
acil.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
acil.sendMessage(M)
W = acil.getContact(msg.to)
acil.sendText(msg.to,"old user")
elif cms(msg.text,["Add"]):
msg.contentType = 13
msg.contentMetadata = {'mid': 'u0869b63018180639efeb395436479e02'}
acil.sendText(msg.to,"❂•••••✰Gye✰•••••❂")
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': 'u0869b63018180639efeb395436479e02'}
acil.sendMessage(msg)
acil.sendText(msg.to,"❂•••••✰Gye✰•••••❂")
elif "Tagme: " in msg.text:
c = msg.text.replace("Tagme: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["tagme"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Welcome: " in msg.text:
c = msg.text.replace("Welcome: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["joingc"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Left: " in msg.text:
c = msg.text.replace("Left: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["leftgc"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Sider: " in msg.text:
c = msg.text.replace("Sider: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["sider1"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Setrespon: " in msg.text:
c = msg.text.replace("Setrespon: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["responName"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Cekresponse" in msg.text:
acil.sendText(msg.to,"👇Respon saat di tag👇\n" + wait["tagme"])
acil.sendText(msg.to,"👇Respon saat di add👇\n" + wait["comment"])
acil.sendText(msg.to,"👇Respon saat member join👇\n" + wait["joingc"])
acil.sendText(msg.to,"👇Respon saat member left👇\n" + wait["leftgc"])
acil.sendText(msg.to,"👇Respon saat member readchat👇\n" + wait["sider1"])
acil.sendText(msg.to,"👇Respon saat member memanggil👇\n" + wait["responName"])
acil.sendText(msg.to,"👇Respon di autolike👇\n" + wait["comment1"] + "\n\nHAL INI TIDAK DAPAT DI UBAH SESUAI HAK CIPTA\nCREATOR::PRANKBOTS")
elif msg.text in ["Left:on"]:
if wait["leftOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["leftOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Left:off"]:
if wait["leftOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["leftOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Welcome:on"]:
if wait['wcOn'] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah ON")
else:
wait["wcOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Welcome:off"]:
if wait['wcOn'] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Sudah off")
else:
wait['wcOn'] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text.lower() == 'group id':
gid = acil.getGroupIdsJoined()
h = "❂•••••••••L I S T I D G R O U P••••••••••❂\n "
for i in gid:
h += "[%s]:%s\n" % (acil.getGroup(i).name,i)
acil.sendText(msg.to,h)
elif msg.text in ["Gcancelall"]:
gid = acil.getGroupIdsInvited()
for i in gid:
acil.rejectGroupInvitation(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Success menolak semua undangan")
else:
acil.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Add:on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On✔")
else:
acil.sendText(msg.to,"Already On✔")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On✔")
else:
acil.sendText(msg.to,"Already On✔")
elif msg.text in ["Add:off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off✖")
else:
acil.sendText(msg.to,"Hal ini sudah dimatikan✖")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already Off✖")
else:
acil.sendText(msg.to,"Untuk mengaktifkan-off✖")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
acil.sendText(msg.to,"✨We changed the message✨")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
acil.sendText(msg.to,"✨We changed the Help✨")
elif "Msg add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JP":
acil.sendText(msg.to,"✨Kami mengubah pesan✨")
else:
acil.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message confirm"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
acil.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
acil.sendText(msg.to,"I changed the language to engglis✔")
else:
wait["lang"] = "JP"
acil.sendText(msg.to,"I changed the language to indonesia✔")
elif "Message set: " in msg.text:
c = msg.text.replace("Message set: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["comment"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif msg.text in ["Comment:on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Aku berada di✔")
else:
acil.sendText(msg.to,"To open✔")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"✔")
else:
acil.sendText(msg.to,"✔")
elif msg.text in ["Com:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off ✖")
else:
acil.sendText(msg.to,"It is already turned off ✖")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off✖")
else:
acil.sendText(msg.to,"To turn off✖")
elif msg.text in ["Com","Comment"]:
acil.sendText(msg.to,"✨Auto komentar saat ini telah ditetapkan sebagai berikut✨\n\n" + str(wait["comment"]))
elif msg.text in ["Glink","Url"]:
if msg.toType == 2:
g = acil.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
acil.updateGroup(g)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif "gurl+" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl+","")
gurl = acil.reissueGroupTicket(gid)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
acil.sendText(msg.to,"GyeVha Protect")
elif "gurl" in msg.text:
if msg.toType == 1:
tid = msg.text.replace("gurl","")
turl = ki.getUserTicket(tid)
ki.sendText(msg.to,"line://ti/p" + turl)
else:
ki.sendText(msg.to,"error")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = acil.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
acil.updateGroup(x)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Can't be used outside the group")
else:
acil.sendText(msg.to,"Not for use less than group")
# else:
# acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Comban"]:
wait["wblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist…”✚")
elif msg.text in ["Comban del"]:
wait["dblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist…”✚")
elif msg.text in ["Comban cek"]:
if wait["commentBlack"] == {}:
acil.sendText(msg.to,"Nothing in the blacklist✖")
else:
acil.sendText(msg.to,"The following is a blacklist✔")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif msg.text in ["Like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Like off","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif "Namelock:on" in msg.text:
# if msg.from_ in admin or owner:
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN ON")
else:
acil.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = acil.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
# if msg.from_ in admin or owner:
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
acil.sendText(msg.to,"ALREADY OFF")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
acil.sendText(msg.to,"BOT API SIMISIMI TURN ON")
ki.sendText(msg.to,"already turn active")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
acil.sendText(msg.to,"BOT API SIMISIMI TURN OFF")
ki.sendText(msg.to,"already non active")
elif msg.text in ["Read on","Read:on"]:
if wait['alwayRead'] == True:
if wait["alwayRead"] == "JP":
acil.sendText(msg.to,"Auto Sider ON")
else:
wait["alwayRead"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Read off","Read:off"]:
if wait['alwayRead'] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Sider OFF")
else:
wait['alwayRead'] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF auto read")
elif msg.text in ["Deletechat"]:
if wait['Removechat'] == True:
if wait["Removechat"] == "JP":
acil.sendText(msg.to,"Success!!!")
if wait['Removechat'] == False:
if wait["lang"] == "JP":
pass
elif "Sider:on" in msg.text:
# if msg.toType == 2:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
acil.sendText(msg.to,"Siap On Cek Sider")
elif "Sider:off" in msg.text:
# if msg.toType == 2:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
acil.sendText(msg.to, "Cek Sider Off")
else:
acil.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Autorespon on","Autorespon:on","Respon on","Respon:on"]:
if wait["detectMention"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Respon ON")
else:
wait["detectMention"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Autorespon off","Autorespon:off","Respon off","Respon:off"]:
if wait["detectMention"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Respon OFF")
else:
wait["detectMention"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text in ["Notag:on"]:
if wait["kickMention"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"☠️DANGER TAG KICK ON☠️")
else:
wait["kickMention"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"already ON")
elif msg.text in ["Notag:off"]:
if wait["kickMention"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"SELF PROTECT TAG OFF ✔")
else:
wait["kickMention"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"already turn OF")
elif msg.text.lower() == 'Clock:on':
if wait["clock"] == True:
acil.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"Jam on✔")
elif msg.text in ["Stickertag:on"]:
if wait["stickerMention"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah on")
else:
wait["stickerMention"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Stickertag:off"]:
if wait["stickerMention"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah off")
else:
wait["stickerMention"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text.lower() == 'Clock:off':
if wait["clock"] == False:
acil.sendText(msg.to,"Hal ini sudah off✖")
else:
wait["clock"] = False
acil.sendText(msg.to," Dimatikan ✔")
elif "Clockname " in msg.text:
n = msg.text.replace("Jam say ","")
if len(n.decode("utf-8")) > 30:
acil.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
acil.sendText(msg.to,"Ini telah diubah✔\n\n" + n)
elif msg.text in ["Translate"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,translateMessage)
else:
acil.sendText(msg.to,helpt)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"Diperbarui✔")
else:
acil.sendText(msg.to,"✨Silahkan Aktifkan Nama✨")
elif ("Fuck " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
acil.kickoutFromGroup(msg.to,[target])
except:
acil.sendText(msg.to,"Error")
elif ("Kick1 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
ki.sendText(msg.to,"Error")
elif ("Kick2 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("Kick3 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki3.kickoutFromGroup(msg.to,[target])
except:
ki3.sendText(msg.to,"Error")
elif ("Kick4 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki4.kickoutFromGroup(msg.to,[target])
except:
ki4.sendText(msg.to,"Error")
elif ("Kick5 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif ("Kick6 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki6.kickoutFromGroup(msg.to,[target])
except:
ki6.sendText(msg.to,"Error")
elif ("Kick7 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki7.kickoutFromGroup(msg.to,[target])
except:
ki7.sendText(msg.to,"Error")
elif ("Kick8 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki8.kickoutFromGroup(msg.to,[target])
except:
ki8.sendText(msg.to,"Error")
elif ("Kick9 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki9.kickoutFromGroup(msg.to,[target])
except:
ki9.sendText(msg.to,"Error")
elif ("Kick10 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki10.kickoutFromGroup(msg.to,[target])
except:
ki10.sendText(msg.to,"Error")
elif ("Sc " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
key = acil.getContact(key1)
acil.sendText(msg.to,"" + key1)
elif "Bro " in msg.text:
nk0 = msg.text.replace("Bro ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
acil.sendText(msg.to,"Good Bye")
#-----------------------------------------------------------
elif ("Bye " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
pass
elif ("Ban " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Succes Banned")
except:
pass
elif msg.text in ["Mygroups"]:
gid = acil.getGroupIdsJoined()
h = ""
for i in gid:
h += "[⛓️] %s \n" % (acil.getGroup(i).name + " | 🗜️Members : " + str(len (acil.getGroup(i).members)))
acil.sendText(msg.to, "☆「Group List」☆\n"+ h +"🗜️Total Group : " +str(len(gid)))
#----------------------------------------------------------
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Locked")
except:
acil.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif "Id " in msg.text:
msgg = msg.text.replace("Id ",'')
conn = acil.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
acil.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
acil.sendMessage(msg)
#_________________________________________________________________________
elif 'ig ' in msg.text.lower():
#if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
acil.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
acil.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
acil.sendText(msg.to, str(njer))
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
acil.sendImageWithURL(msg.to,path)
except:
pass
elif msg.text in ["Kalender","Time","Waktu"]:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
acil.sendText(msg.to, rst)
#==============================================================================#
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
acil.sendText(msg.to,van)
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = acil.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
acil.sendMessage(msg)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif msg.text in ["Myname"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = acil.getContact(mid)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = acil.getContact(mid)
acil.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = acil.getContact(mid)
cu = acil.channel.getCover(mid)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = acil.getContact(mid)
cu = acil.channel.getCover(mid)
path = str(cu)
acil.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to, g.mid)
else:
pass
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
acil.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
acil.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
acil.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
acil.sendVideoWithURL(msg.to,wait["pap"])
#=========================
#-----------------------------------------------------------
elif msg.text == "Check":
acil.sendText(msg.to, "Check Yang nyimak")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif 'copy ' in msg.text.lower():
if msg.toType == 2:
red = re.compile(re.escape('copy '),re.IGNORECASE)
tname = red.sub('',msg.text)
tname = tname.lstrip()
tname = tname.replace(" @","$spliter$")
tname = tname.rstrip()
tname = tname.split("$spliter$")
tname = tname[0]
tname = tname[1:]
clist = {
"Founded":False,
"displayName":"",
"statusMessage":"",
"pictureStatus":""
}
mems = acil.getGroup(msg.to).members
for targ in mems:
if targ.displayName == tname:
clist["displayName"] = targ.displayName
clist["statusMessage"] = targ.statusMessage
clist["pictureStatus"] = targ.pictureStatus
clist["Founded"] = True
if clist["Founded"]:
wait["selfStatus"] = False
me = acil.getProfile()
me.displayName = clist["displayName"]
me.statusMessage = clist["statusMessage"]
me.pictureStatus = clist["pictureStatus"]
acil.updateDisplayPicture(me.pictureStatus)
acil.updateProfile(me)
acil.sendText(msg.to,"Done")
else:
acil.sendText(msg.to,"Done")
elif "Urlpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Urlpict @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Urlcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Urlcover @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
acil.sendText(msg.to,"Tidak Ada kontak blacklist")
else:
acil.sendText(msg.to,"═══════List Blacklist═══════")
h = ""
for i in wait["blacklist"]:
h = acil.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
acil.sendMessage(M)
#-------------------------------------------------
elif "Spam @" in msg.text:
# if msg.from_ in admin:
_name = msg.text.replace("Spam @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to,"Wating in progres...")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki8.sendText(g.mid,"Your Account Has Been Spammed !")
ki9.sendText(g.mid,"Your Account Has Been Spammed !")
ki10.sendText(g.mid,"Your Account Has Been Spammed !")
acil.sendText(msg.to, "Succes")
print " Spammed !"
#--------------------------------------------------------------------------
#-----------------------------------------------------------
elif "Mban:" in msg.text:
midd = msg.text.replace("Mban:","")
wait["blacklist"][midd] = True
acil.sendText(msg.to,"Target Lock")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
text = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (text+"\n")
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
ki.sendText(msg.to, text)
else:
acil.sendText(msg.to, "Out Of Range!")
elif txt[1] == "off":
if jmlh <= 10000:
ki.sendText(msg.to, tulisan)
else:
acil.sendText(msg.to, "Out Of Range!")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
acil.sendText(msg.to,"Target ditambahkan!")
break
except:
acil.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
acil.sendText(msg.to,"Target dihapuskan!")
break
except:
acil.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
acil.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
acil.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
acil.sendText(msg.to,"Mimic change to target")
else:
acil.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
acil.sendText(msg.to,"Reply Message on")
else:
acil.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
acil.sendText(msg.to,"Reply Message off")
else:
acil.sendText(msg.to,"Sudah off")
elif "Grupname" in msg.text:
saya = msg.text.replace('Grupname','')
gid = acil.getGroup(msg.to)
acil.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Gid" in msg.text:
saya = msg.text.replace('Gid','')
gid = acil.getGroup(msg.to)
acil.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif msg.text in ["Friendlist"]:
contactlist = acil.getAllContactIds()
kontak = acil.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = acil.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
acil.sendText(msg.to, msgs)
elif msg.text in ["Friendlistmid"]:
gruplist = acil.getAllContactIds()
kontak = acil.getContacts(gruplist)
num=1
msgs="═════════List FriendMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════List FriendMid═════════\n\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
#-----------------------------------------------
elif "lurk:on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
acil.sendText(msg.to,"Lurking already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
acil.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk:off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
acil.sendText(msg.to,"Lurking already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
acil.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
acil.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = acil.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
acil.sendMessage(msg)
except Exception as error:
print error
pass
else:
acil.sendText(msg.to, "Lurking has not been set.")
elif msg.text in ["Bl:on"]:
wait["wblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text in ["Unbl:on"]:
wait["dblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'mcheck':
if wait["blacklist"] == {}:
acil.sendText(msg.to," Nothing in the blacklist")
else:
acil.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
#---------Fungsi Banlist With Tag--------#
elif msg.text in ["Banlist","ip banlist"]:
if wait["blacklist"] == {}:
acil.sendText(msg.to,"No user is Blacklisted")
else:
ki.sendText(msg.to,"Blacklisted user")
mc = " 🛡️====||B L A C K L I S T||====🛡️\n"
for mi_d in wait["blacklist"]:
mc += "🗜️" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
print "[Command]Banlist executed"
elif msg.text in ["Clearban"]:
if msg.toType == 2:
wait["blacklist"] = {}
acil.sendText(msg.to,"clear all blacklist")
ki.sendText(msg.to,"done ✔")
ki2.sendText(msg.to,"done ✔")
ki3.sendText(msg.to,"done ✔")
ki4.sendText(msg.to,"done ✔")
ki5.sendText(msg.to,"done ✔")
ki6.sendText(msg.to,"done ✔")
ki7.sendText(msg.to,"done ✔")
ki8.sendText(msg.to,"done ✔")
ki9.sendText(msg.to,"done ✔")
ki10.sendText(msg.to,"done ✔")
ki.sendText(msg.to,"blacklist done all removed 👮")
elif msg.text.lower() == 'kick@mbl':
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Daftar hitam pengguna tidak memiliki")
return
for jj in matched_list:
try:
acil.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
ki6.kickoutFromGroup(msg.to,[jj])
ki7.kickoutFromGroup(msg.to,[jj])
ki8.kickoutFromGroup(msg.to,[jj])
ki9.kickoutFromGroup(msg.to,[jj])
ki10.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#-----------------------------------------------
#---------------------------------------------------
elif "Pict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Pict @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#---------------------------------------------------
#---------------------------------------------------
elif msg.text in ["Recopy"]:
try:
acil.updateDisplayPicture(mybackup.pictureStatus)
acil.updateProfile(mybackup)
acil.sendText(msg.to, "Success")
except Exception as e:
acil.sendText(msg.to, str (e))
#-----------------------------------------------------------------------
elif "Youtube " in msg.text:
try:
textToSearch = (msg.text).replace("Youtube ", "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
acil.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
acil.sendText(msg.to,"Could not find it")
elif "Youtubesearch " in msg.text:
query = msg.text.replace("Youtubesearch ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
acil.sendText(msg.to,hasil)
print '[Command] Youtube Search'
#------------------------------------------------
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to,"~Nama\n" + contact.displayName + "\n~Mid\n" + contact.mid + "\n~Bio\n" + contact.statusMessage + "\n~Profile Picture\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n~Header\n" + str(cu))
except:
acil.sendText(msg.to,"~Nama\n" + contact.displayName + "\n~Mid\n" + contact.mid + "\n~Bio\n" + contact.statusMessage + "\n~Profile Picture\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to,contact.statusMessage)
except:
acil.sendText(msg.to,contact.statusMessage)
elif "Gimage" in msg.text:
group = acil.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
acil.sendImageWithURL(msg.to,path)
elif "Getprofile @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getprofile @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------
elif msg.text in ["Invite"]:
wait["invite"] = True
random.choice(KAC).sendText(msg.to,"send contact 😉")
#------------------------------------------------------------
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text.lower() == 'reboot':
print "[Command]Like executed"
try:
acil.sendText(msg.to,"Restarting...")
restart_program()
except:
acil.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Hay " in msg.text:
say = msg.text.replace("Hay ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
acil.sendAudio(msg.to,"hasil.mp3")
elif "Nuke" in msg.text:
if msg.toType == 2:
print "Nuke ok"
_name = msg.text.replace("Nuke","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
gs = ki7.getGroup(msg.to)
gs = ki8.getGroup(msg.to)
gs = ki9.getGroup(msg.to)
gs = ki10.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Limit gw njir..")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif msg.text in ["Tag","Tagall","Mencret"]:
group = acil.getGroup(msg.to)
k = len(group.members)//500
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*500 : (j+1)*500]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
acil.sendMessage(msg)
elif msg.text.lower() == '.':
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------
elif msg.text.lower() == 'reinvite':
if msg.toType == 2:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"waitting...")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "B1 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "B2 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "B3 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "B4 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "B5 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif "B6 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
#-----------------------------------------------
elif "B7 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
#-----------------------------------------------
elif "B8 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki8.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki8.updateGroup(G)
#-----------------------------------------------
elif "B9 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki9.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki9.updateGroup(G)
#-----------------------------------------------
elif "B10 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki10.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki10.updateGroup(G)
#------------------------------------------------------------------
elif msg.text.lower() == ',':
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
# acil.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
acil.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B1 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B2 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B3 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B4 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B5 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B6 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki6.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B7 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki7.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B8 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki8.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B9 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki9.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B10 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki10.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
acil.sendText(msg.to,"" + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
elif "Say " in msg.text:
bctxt = msg.text.replace("Say ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
elif "Bom " in msg.text:
bctxt = msg.text.replace("Bom ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
# acil.sendText(msg.to, "Bom chat selesai mbut.😂")
elif msg.text.lower() == 'ping':
ki.sendText(msg.to,"Ping ")
ki2.sendText(msg.to,"Ping ")
ki3.sendText(msg.to,"Ping ")
ki4.sendText(msg.to,"Ping ")
ki5.sendText(msg.to,"Ping ")
ki6.sendText(msg.to,"Ping ")
ki7.sendText(msg.to,"Ping ")
ki8.sendText(msg.to,"Ping ")
ki9.sendText(msg.to,"Ping ")
ki10.sendText(msg.to,"Ping ")
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
ki2.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = acil.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = acil.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki6mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki8mid:
if op.param2 in ki7mid:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
else:
G = ki7.getGroup(op.param1)
ki7.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki7mid:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki8.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(G)
else:
G = ki8.getGroup(op.param1)
ki8.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki8.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki9mid:
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = False
ki10.updateGroup(G)
Ticket = ki10.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(G)
else:
G = ki10.getGroup(op.param1)
ki10.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki10.updateGroup(G)
Ticket = ki10.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki10mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki5.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"⚡uJANGAN INVITE TANPA SEIJIN ADMIN.!")
else:
acil.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':'u0869b63018180639efeb395436479e02'}
acil.sendImageWithURL(op.param1,"http://line.me/ti/p/fbTgHwjwGB")
if (wait["message"] in [""," ","\n",None]):
pass
else:
acil.sendText(op.param1,str(wait["message"]))
acil.sendMessage(c)
ki.sendText(op.param1,str(wait["message"]))
ki.sendMessage(c)
ki2.sendText(op.param1,str(wait["message"]))
ki2.sendMessage(c)
ki3.sendText(op.param1,str(wait["message"]))
ki3.sendMessage(c)
ki4.sendText(op.param1,str(wait["message"]))
ki4.sendMessage(c)
ki5.sendText(op.param1,str(wait["message"]))
ki5.sendMessage(c)
ki6.sendText(op.param1,str(wait["message"]))
ki6.sendMessage(c)
ki7.sendText(op.param1,str(wait["message"]))
ki7.sendMessage(c)
ki8.sendText(op.param1,str(wait["message"]))
ki8.sendMessage(c)
ki9.sendText(op.param1,str(wait["message"]))
ki9.sendMessage(c)
ki10.sendText(op.param1,str(wait["message"]))
ki10.sendMessage(c)
#------Open QR Kick start------#
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------Open Kick finish-----#
#------invite Kick start------#
if op.type == 13:
if wait["inviteprotect"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------invite Kick finish-----#
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = acil.getContact(op.param2).displayName
Np = acil.getContact(op.param2).pictureStatus
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n� " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = acil.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
acil.sendText
if op.type == 17:
if wait["wcOn"] == True:
ginfo = acil.getGroup(op.param1)
contact = acil.getContact(op.param2)
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
acil.sendText(op.param1,wait["joingc"] + "\n" + acil.getContact(op.param2).displayName + "\n🚬 Welcome to Group ➡ " + str(ginfo.name) + "\nPembuat grup " + ginfo.creator.displayName + "\n🐺 Salam Kenal Semoga Betah Kk\n Jangan Lupa Baca Not \n▶ No Baper\n▶ No Desah \n▶ No Tikung\n▶ No Modus\n▶ No Nipu\n\n══════zGyevha═══════")
acil.sendMessage(c)
acil.sendImageWithURL(op.param1,"http://dl.profile.line-cdn.net/" + contact.pictureStatus)
print ("MEMBER JOIN TO GROUP")
if op.type == 15:
if wait["leftOn"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
acil.sendMessage(c)
acil.sendText(op.param1,acil.getContact(op.param2).displayName + "\n" + wait["leftgc"])
print ("MEMBER HAS LEFT THE GROUP")
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
time.sleep(0.30)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = acil.fetchOps(acil.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(acil.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
acil.Poll.rev = max(acil.Poll.rev, Op.revision)
bot(Op)
|
cls_oaicitest.py
|
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
#
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import Libs
#-----------------------------------------------------------
import sys # arg
import re # reg
import pexpect # pexpect
import time # sleep
import os
import subprocess
import xml.etree.ElementTree as ET
import logging
import datetime
import signal
import statistics as stat
from multiprocessing import Process, Lock, SimpleQueue
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
#import our libs
import helpreadme as HELP
import constants as CONST
import sshconnection
import cls_module_ue
import cls_ci_ueinfra #class defining the multi Ue infrastrucure
#-----------------------------------------------------------
# Utility functions
#-----------------------------------------------------------
def GetPingTimeAnalysis(ping_log_file):
#ping time values read from file
t_ping=[]
#ping stats (dictionary) to be returned by the function
ping_stat={}
if (os.path.isfile(ping_log_file)):
with open(ping_log_file,"r") as f:
for line in f:
#looking for time=xxx ms field
result=re.match('^.+time=(?P<ping_time>[0-9\.]+)',line)
if result != None:
t_ping.append(float(result.group('ping_time')))
#initial stats
ping_stat['min_0']=min(t_ping)
ping_stat['mean_0']=stat.mean(t_ping)
ping_stat['median_0']=stat.median(t_ping)
ping_stat['max_0']=max(t_ping)
#get index of max value
max_loc=t_ping.index(max(t_ping))
ping_stat['max_loc']=max_loc
#remove it
t_ping.pop(max_loc)
#new stats after removing max value
ping_stat['min_1']=min(t_ping)
ping_stat['mean_1']=stat.mean(t_ping)
ping_stat['median_1']=stat.median(t_ping)
ping_stat['max_1']=max(t_ping)
return ping_stat
else:
logging.error("Ping log file does not exist")
return -1
#-----------------------------------------------------------
# OaiCiTest Class Definition
#-----------------------------------------------------------
class OaiCiTest():
def __init__(self):
self.ranRepository = ''
self.ranBranch = ''
self.ranCommitID = ''
self.ranAllowMerge = False
self.ranTargetBranch = ''
self.FailReportCnt = 0
self.ADBIPAddress = ''
self.ADBUserName = ''
self.ADBPassword = ''
self.ADBCentralized = True
self.testCase_id = ''
self.testXMLfiles = []
self.testUnstable = False
self.testMinStableId = '999999'
self.testStabilityPointReached = False
self.desc = ''
self.ping_args = ''
self.ping_packetloss_threshold = ''
self.iperf_args = ''
self.iperf_packetloss_threshold = ''
self.iperf_profile = ''
self.iperf_options = ''
self.iperf_direction = ''
self.nbMaxUEtoAttach = -1
self.UEDevices = []
self.UEDevicesStatus = []
self.UEDevicesRemoteServer = []
self.UEDevicesRemoteUser = []
self.UEDevicesOffCmd = []
self.UEDevicesOnCmd = []
self.UEDevicesRebootCmd = []
self.CatMDevices = []
self.UEIPAddresses = []
self.idle_sleep_time = 0
self.x2_ho_options = 'network'
self.x2NbENBs = 0
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.repeatCounts = []
self.finalStatus = False
self.UEIPAddress = ''
self.UEUserName = ''
self.UEPassword = ''
self.UE_instance = 0
self.UESourceCodePath = ''
self.UELogFile = ''
self.Build_OAI_UE_args = ''
self.Initialize_OAI_UE_args = ''
self.clean_repository = True
self.air_interface=''
self.expectedNbOfConnectedUEs = 0
self.ue_id = '' #used for module identification
self.ue_trace ='' #used to enable QLog trace for Module UE, passed to Module UE object at InitializeUE()
def BuildOAIUE(self,HTML):
if self.UEIPAddress == '' or self.ranRepository == '' or self.ranBranch == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
result = re.search('--nrUE', self.Build_OAI_UE_args)
if result is not None:
self.air_interface='nr-uesoftmodem'
ue_prefix = 'NR '
else:
self.air_interface='lte-uesoftmodem'
ue_prefix = ''
result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
if result is not None:
full_ran_repo_name = self.ranRepository.replace('git/', 'git')
else:
full_ran_repo_name = self.ranRepository + '.git'
SSH.command('mkdir -p ' + self.UESourceCodePath, '\$', 5)
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
SSH.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# here add a check if git clone or git fetch went smoothly
SSH.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
SSH.command('git config user.name "OAI Jenkins"', '\$', 5)
if self.clean_repository:
SSH.command('ls *.txt', '\$', 5)
result = re.search('LAST_BUILD_INFO', SSH.getBefore())
if result is not None:
mismatch = False
SSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, SSH.getBefore())
if result is None:
mismatch = True
SSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranAllowMerge:
result = re.search('YES', SSH.getBefore())
if result is None:
mismatch = True
SSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
result = re.search('develop', SSH.getBefore())
else:
result = re.search(self.ranTargetBranch, SSH.getBefore())
if result is None:
mismatch = True
else:
result = re.search('NO', SSH.getBefore())
if result is None:
mismatch = True
if not mismatch:
SSH.close()
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK)
return
SSH.command('echo ' + self.UEPassword + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
SSH.command('git checkout -f ' + self.ranCommitID, '\$', 30)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if self.ranAllowMerge:
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
SSH.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
SSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
SSH.command('source oaienv', '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('mkdir -p log', '\$', 5)
SSH.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
SSH.command('stdbuf -o0 ./build_oai ' + self.Build_OAI_UE_args + ' 2>&1 | stdbuf -o0 tee compile_oai_ue.log', 'Bypassing the Tests|build have failed', 1200)
SSH.command('ls ran_build/build', '\$', 3)
SSH.command('ls ran_build/build', '\$', 3)
buildStatus = True
result = re.search(self.air_interface, SSH.getBefore())
if result is None:
buildStatus = False
SSH.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
SSH.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
SSH.command('mv compile_oai_ue.log ' + 'build_log_' + self.testCase_id, '\$', 5)
if buildStatus:
# Generating a BUILD INFO file
SSH.command('echo "SRC_BRANCH: ' + self.ranBranch + '" > ../LAST_BUILD_INFO.txt', '\$', 2)
SSH.command('echo "SRC_COMMIT: ' + self.ranCommitID + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranAllowMerge:
SSH.command('echo "MERGED_W_TGT_BRANCH: YES" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
SSH.command('echo "TGT_BRANCH: develop" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
SSH.command('echo "TGT_BRANCH: ' + self.ranTargetBranch + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
SSH.command('echo "MERGED_W_TGT_BRANCH: NO" >> ../LAST_BUILD_INFO.txt', '\$', 2)
SSH.close()
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK, 'OAI UE')
else:
SSH.close()
logging.error('\u001B[1m Building OAI UE Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.Build_OAI_UE_args, 'KO', CONST.ALL_PROCESSES_OK, 'OAI UE')
HTML.CreateHtmlTabFooter(False)
self.ConditionalExit()
def CheckFlexranCtrlInstallation(self,RAN,EPC,CONTAINERS):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
return
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('ls -ls /opt/flexran_rtc/*/rt_controller', '\$', 5)
result = re.search('/opt/flexran_rtc/build/rt_controller', SSH.getBefore())
if result is not None:
RAN.flexranCtrlInstalled=True
RAN.flexranCtrlIpAddress=EPC.IPAddress
logging.debug('Flexran Controller is installed')
else:
# Maybe flexran-rtc is deployed into a container
SSH.command('docker inspect --format="FLEX_RTC_IP_ADDR = {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" prod-flexran-rtc', '\$', 5)
result = re.search('FLEX_RTC_IP_ADDR = (?P<flex_ip_addr>[0-9\.]+)', SSH.getBefore())
if result is not None:
RAN.flexranCtrlDeployed=True
RAN.flexranCtrlIpAddress=result.group('flex_ip_addr')
CONTAINERS.flexranCtrlDeployed=True
CONTAINERS.flexranCtrlIpAddress=result.group('flex_ip_addr')
logging.debug('Flexran Controller is deployed: ' + RAN.flexranCtrlIpAddress)
SSH.close()
def InitializeFlexranCtrl(self, HTML,RAN,EPC):
if RAN.flexranCtrlInstalled == False:
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd /opt/flexran_rtc', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S rm -f log/*.log', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S echo "build/rt_controller -c log_config/basic_log" > ./my-flexran-ctl.sh', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S chmod 755 ./my-flexran-ctl.sh', '\$', 5)
SSH.command('echo ' + EPC.Password + ' | sudo -S daemon --unsafe --name=flexran_rtc_daemon --chdir=/opt/flexran_rtc -o /opt/flexran_rtc/log/flexranctl_' + self.testCase_id + '.log ././my-flexran-ctl.sh', '\$', 5)
SSH.command('ps -aux | grep --color=never rt_controller', '\$', 5)
result = re.search('rt_controller -c ', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m Initialize FlexRan Controller Completed\u001B[0m')
RAN.flexranCtrlStarted=True
SSH.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def InitializeUE_common(self, device_id, idx,COTS_UE):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if not self.ADBCentralized:
# Reboot UE
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesRebootCmd[idx], '\$', 60)
# Wait
#time.sleep(60)
# Put in LTE-Mode only
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode1 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode2 11"\'', '\$', 60)
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode3 11"\'', '\$', 60)
# enable data service
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
# we need to do radio on/off cycle to make sure of above changes
# airplane mode off // radio on
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
#time.sleep(10)
# airplane mode on // radio off
#SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
# normal procedure without reboot
# enable data service
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
# airplane mode on // radio off
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
SSH.close()
return
#RH quick add-on to integrate cots control defined by yaml
#if device_id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
logging.debug('Device id ' + str(device_id) + ', in COTS UE dict : ' + str(COTS_UE.Check_Exists(device_id)))
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
else:
# enable data service
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
# The following commands are deprecated since we no longer work on Android 7+
# SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell settings put global airplane_mode_on 1', '\$', 10)
# SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true', '\$', 60)
# a dedicated script has to be installed inside the UE
# airplane mode on means call /data/local/tmp/off
if device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
#airplane mode off means call /data/local/tmp/on
logging.debug('\u001B[1mUE (' + device_id + ') Initialize Completed\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def InitializeUE(self,HTML,RAN,EPC, COTS_UE, InfraUE,ue_trace):
if self.ue_id=='':#no ID specified, then it is a COTS controlled by ADB
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.InitializeUE_common, args = (device_id,i,COTS_UE,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
else: #if an ID is specified, it is a module from the yaml infrastructure file
#RH
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
Module_UE.ue_trace=ue_trace
is_module=Module_UE.CheckCMProcess(EPC.Type)
if is_module:
Module_UE.EnableTrace()
time.sleep(5)
# Looping attach / detach / wait to be successful at least once
cnt = 0
status = -1
while cnt < 4:
Module_UE.Command("wup")
logging.debug("Waiting for IP address to be assigned")
time.sleep(20)
logging.debug("Retrieve IP address")
status=Module_UE.GetModuleIPAddress()
if status==0:
cnt = 10
else:
cnt += 1
Module_UE.Command("detach")
time.sleep(20)
if cnt == 10 and status == 0:
HTML.CreateHtmlTestRow(Module_UE.UEIPAddress, 'OK', CONST.ALL_PROCESSES_OK)
logging.debug('UE IP addresss : '+ Module_UE.UEIPAddress)
#execute additional commands from yaml file after UE attach
SSH = sshconnection.SSHConnection()
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
if hasattr(Module_UE,'StartCommands'):
for startcommand in Module_UE.StartCommands:
cmd = 'echo ' + Module_UE.HostPassword + ' | ' + startcommand
SSH.command(cmd,'\$',5)
SSH.close()
#check that the MTU is as expected / requested
Module_UE.CheckModuleMTU()
else: #status==-1 failed to retrieve IP address
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
def InitializeOAIUE(self,HTML,RAN,EPC,COTS_UE,InfraUE):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
self.ConditionalExit()
UE_prefix = ''
else:
UE_prefix = 'NR '
SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
# b2xx_fx3_utils reset procedure
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90)
result = re.search('type: b200', SSH.getBefore())
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
SSH.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90)
result = re.search('type: n3xx', str(SSH.getBefore()))
if result is not None:
logging.debug('Found a N3xx device --> resetting it')
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
# Initialize_OAI_UE_args usually start with -C and followed by the location in repository
SSH.command('source oaienv', '\$', 5)
SSH.command('cd cmake_targets/ran_build/build', '\$', 5)
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
# We may have to regenerate the .u* files
if result is None:
SSH.command('ls /tmp/*.sed', '\$', 5)
result = re.search('adapt_usim_parameters', SSH.getBefore())
if result is not None:
SSH.command('sed -f /tmp/adapt_usim_parameters.sed ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
else:
SSH.command('sed -e "s#93#92#" -e "s#8baf473f2f8fd09487cccbd7097c6862#fec86ba6eb707ed08905757b1bb44b8f#" -e "s#e734f8734007d6c5ce7a0508809e7e9c#C42449363BBAD02B66D16BC975D77CC1#" ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf .u*', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S ../../../targets/bin/conf2uedata -c ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf -o .', '\$', 5)
else:
SSH.command('if [ -e rbconfig.raw ]; then echo ' + self.UEPassword + ' | sudo -S rm rbconfig.raw; fi', '\$', 5)
SSH.command('if [ -e reconfig.raw ]; then echo ' + self.UEPassword + ' | sudo -S rm reconfig.raw; fi', '\$', 5)
# Copy the RAW files from gNB running directory (maybe on another machine)
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/rbconfig.raw', '.')
if (copyin_res == 0):
SSH.copyout(self.UEIPAddress, self.UEUserName, self.UEPassword, './rbconfig.raw', self.UESourceCodePath + '/cmake_targets/ran_build/build')
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/reconfig.raw', '.')
if (copyin_res == 0):
SSH.copyout(self.UEIPAddress, self.UEUserName, self.UEPassword, './reconfig.raw', self.UESourceCodePath + '/cmake_targets/ran_build/build')
SSH.command('echo "ulimit -c unlimited && ./'+ self.air_interface +' ' + self.Initialize_OAI_UE_args + '" > ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.command('chmod 775 ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
self.UELogFile = 'ue_' + self.testCase_id + '.log'
# We are now looping several times to hope we really sync w/ an eNB
doOutterLoop = True
outterLoopCounter = 5
gotSyncStatus = True
fullSyncStatus = True
while (doOutterLoop):
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets/ran_build/build', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup sudo -E ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh' + ' > ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log ' + ' 2>&1 &', self.UEUserName, 5)
time.sleep(6)
SSH.command('cd ../..', '\$', 5)
doLoop = True
loopCounter = 10
gotSyncStatus = True
# the 'got sync' message is for the UE threads synchronization
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here should never occur
logging.error('"got sync" message never showed!')
gotSyncStatus = False
doLoop = False
continue
SSH.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
if self.air_interface == 'nr-uesoftmodem':
result = re.search('Starting sync detection', SSH.getBefore())
else:
result = re.search('got sync', SSH.getBefore())
if result is None:
time.sleep(10)
else:
doLoop = False
logging.debug('Found "got sync" message!')
if gotSyncStatus == False:
# we certainly need to stop the lte-uesoftmodem process if it is still running!
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT -r *-uesoftmodem', '\$', 4)
time.sleep(3)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
continue
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
# That is the case for LTE
# In NR case, it's a positive message that will show if synchronization occurs
doLoop = True
if self.air_interface == 'nr-uesoftmodem':
loopCounter = 10
else:
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
if self.air_interface == 'nr-uesoftmodem':
# Here we do have great chances that UE did NOT cell-sync w/ gNB
doLoop = False
fullSyncStatus = False
logging.debug('Never seen the NR-Sync message (Measured Carrier Frequency) --> try again')
time.sleep(6)
# Stopping the NR-UE
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('nr-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT nr-uesoftmodem', '\$', 4)
time.sleep(6)
else:
# Here we do have a great chance that the UE did cell-sync w/ eNB
doLoop = False
doOutterLoop = False
fullSyncStatus = True
continue
SSH.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync|Frequency"', '\$', 4)
if self.air_interface == 'nr-uesoftmodem':
# Positive messaging -->
result = re.search('Measured Carrier Frequency', SSH.getBefore())
if result is not None:
doLoop = False
doOutterLoop = False
fullSyncStatus = True
else:
time.sleep(6)
else:
# Negative messaging -->
result = re.search('No cell synchronization found', SSH.getBefore())
if result is None:
time.sleep(6)
else:
doLoop = False
fullSyncStatus = False
logging.debug('Found: "No cell synchronization" message! --> try again')
time.sleep(6)
SSH.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
if fullSyncStatus and gotSyncStatus:
doInterfaceCheck = False
if self.air_interface == 'lte-uesoftmodem':
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
doInterfaceCheck = True
# For the moment, only in explicit noS1 without kernel module (ie w/ tunnel interface)
if self.air_interface == 'nr-uesoftmodem':
result = re.search('--noS1 --nokrnmod 1', str(self.Initialize_OAI_UE_args))
if result is not None:
doInterfaceCheck = True
if doInterfaceCheck:
SSH.command('ifconfig oaitun_ue1', '\$', 4)
SSH.command('ifconfig oaitun_ue1', '\$', 4)
# ifconfig output is different between ubuntu 16 and ubuntu 18
result = re.search('inet addr:[0-9]|inet [0-9]', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m oaitun_ue1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = True
else:
logging.debug(SSH.getBefore())
logging.error('\u001B[1m oaitun_ue1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
if RAN.eNBmbmsEnables[0]:
SSH.command('ifconfig oaitun_uem1', '\$', 4)
result = re.search('inet addr', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1m oaitun_uem1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = tunnelInterfaceStatus and True
else:
logging.error('\u001B[1m oaitun_uem1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
else:
tunnelInterfaceStatus = True
else:
tunnelInterfaceStatus = True
SSH.close()
if fullSyncStatus and gotSyncStatus and tunnelInterfaceStatus:
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'OK', CONST.ALL_PROCESSES_OK, 'OAI UE')
logging.debug('\u001B[1m Initialize OAI UE Completed\u001B[0m')
if (self.ADBIPAddress != 'none'):
self.UEDevices = []
self.UEDevices.append('OAI-UE')
self.UEDevicesStatus = []
self.UEDevicesStatus.append(CONST.UE_STATUS_DETACHED)
else:
if self.air_interface == 'lte-uesoftmodem':
if RAN.eNBmbmsEnables[0]:
HTML.htmlUEFailureMsg='oaitun_ue1/oaitun_uem1 interfaces are either NOT mounted or NOT configured'
else:
HTML.htmlUEFailureMsg='oaitun_ue1 interface is either NOT mounted or NOT configured'
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', CONST.OAI_UE_PROCESS_NO_TUNNEL_INTERFACE, 'OAI UE')
else:
HTML.htmlUEFailureMsg='nr-uesoftmodem did NOT synced'
HTML.CreateHtmlTestRow(self.air_interface + ' ' + self.Initialize_OAI_UE_args, 'KO', CONST.OAI_UE_PROCESS_COULD_NOT_SYNC, 'OAI UE')
logging.error('\033[91mInitialize OAI UE Failed! \033[0m')
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def checkDevTTYisUnlocked(self):
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0
while count < 5:
SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
result = re.search('picocom', SSH.getBefore())
if result is None:
count = 10
else:
time.sleep(5)
count = count + 1
SSH.close()
def InitializeCatM(self,HTML):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Doing a power cycle
SSH.command('AT^RESET', 'SIMSTORE,READY', 15)
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
SSH.command('ATE1', 'OK', 5)
# Disabling the Radio
SSH.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
# Checking if auto-attach is enabled
SSH.command('AT^AUTOATT?', 'OK', 5)
result = re.search('AUTOATT: (?P<state>[0-9\-]+)', SSH.getBefore())
if result is not None:
if result.group('state') is not None:
autoAttachState = int(result.group('state'))
if autoAttachState is not None:
if autoAttachState == 0:
SSH.command('AT^AUTOATT=1', 'OK', 5)
logging.debug('\u001B[1m Auto-Attach enabled\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Could not check Auto-Attach! \u001B[0m')
# Force closure of picocom but device might still be locked
SSH.close()
SSH.disablePicocomClosure()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def TerminateCatM(self,HTML):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Disabling the Radio
SSH.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
SSH.close()
SSH.disablePicocomClosure()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def AttachCatM(self,HTML,RAN,COTS_UE,EPC,InfraUE):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.enablePicocomClosure()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
SSH.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
SSH.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
SSH.command('AT', 'OK|ERROR', 5)
SSH.command('AT', 'OK', 5)
# Enabling the Radio
SSH.command('AT+CFUN=1', 'SIMSTORE,READY', 5)
logging.debug('\u001B[1m Cellular Functionality enabled\u001B[0m')
time.sleep(4)
# We should check if we register
count = 0
attach_cnt = 0
attach_status = False
while count < 5:
SSH.command('AT+CEREG?', 'OK', 5)
result = re.search('CEREG: 2,(?P<state>[0-9\-]+),', SSH.getBefore())
if result is not None:
mDataConnectionState = int(result.group('state'))
if mDataConnectionState is not None:
if mDataConnectionState == 1:
count = 10
attach_status = True
result = re.search('CEREG: 2,1,"(?P<networky>[0-9A-Z]+)","(?P<networkz>[0-9A-Z]+)"', SSH.getBefore())
if result is not None:
networky = result.group('networky')
networkz = result.group('networkz')
logging.debug('\u001B[1m CAT-M module attached to eNB (' + str(networky) + '/' + str(networkz) + ')\u001B[0m')
else:
logging.debug('\u001B[1m CAT-M module attached to eNB\u001B[0m')
else:
logging.debug('+CEREG: 2,' + str(mDataConnectionState))
attach_cnt = attach_cnt + 1
else:
logging.debug(SSH.getBefore())
attach_cnt = attach_cnt + 1
count = count + 1
time.sleep(1)
if attach_status:
SSH.command('AT+CESQ', 'OK', 5)
result = re.search('CESQ: 99,99,255,255,(?P<rsrq>[0-9]+),(?P<rsrp>[0-9]+)', SSH.getBefore())
if result is not None:
nRSRQ = int(result.group('rsrq'))
nRSRP = int(result.group('rsrp'))
if (nRSRQ is not None) and (nRSRP is not None):
logging.debug(' RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB')
logging.debug(' RSRP = ' + str(-140+nRSRP) + ' dBm')
SSH.close()
SSH.disablePicocomClosure()
html_queue = SimpleQueue()
self.checkDevTTYisUnlocked()
if attach_status:
html_cell = '<pre style="background-color:white">CAT-M module Attachment Completed in ' + str(attach_cnt+4) + ' seconds'
if (nRSRQ is not None) and (nRSRP is not None):
html_cell += '\n RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB'
html_cell += '\n RSRP = ' + str(-140+nRSRP) + ' dBm</pre>'
else:
html_cell += '</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', 1, html_queue)
else:
logging.error('\u001B[1m CAT-M module Attachment Failed\u001B[0m')
html_cell = '<pre style="background-color:white">CAT-M module Attachment Failed</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue('N/A', 'KO', 1, html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def PingCatM(self,HTML,RAN,EPC,COTS_UE,InfraUE):
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
try:
statusQueue = SimpleQueue()
lock = Lock()
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
if re.match('OAI', EPC.Type, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS: not implemented yet')
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
self.ConditionalExit()
else:
SSH.command('egrep --color=never "Allocated ipv4 addr" /opt/ltebox/var/log/xGwLog.0', '\$', 5)
result = re.search('Allocated ipv4 addr: (?P<ipaddr>[0-9\.]+) from Pool', SSH.getBefore())
if result is not None:
moduleIPAddr = result.group('ipaddr')
else:
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
ping_time = re.findall("-c (\d+)",str(self.ping_args))
device_id = 'catm'
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' ' + str(moduleIPAddr) + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(moduleIPAddr) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + moduleIPAddr + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
lock.release()
SSH.close()
html_cell = '<pre style="background-color:white">CAT-M module\nIP Address : ' + moduleIPAddr + '\n' + qMsg + '</pre>'
statusQueue.put(html_cell)
if (packetLossOK):
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', 1, statusQueue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', 1, statusQueue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE_common(self, device_id, statusQueue, lock, idx,COTS_UE):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device Id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode OFF (ie Radio ON)
COTS_UE.Set_Airplane(device_id, 'OFF')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
# airplane mode off // radio on
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(2)
max_count = 45
count = max_count
while count > 0:
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry" | grep -m 1 mDataConnectionState', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\' | grep -m 1 mDataConnectionState', '\$', 60)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('mDataConnectionState Not Found!')
lock.release()
break
mDataConnectionState = int(result.group('state'))
if mDataConnectionState == 2:
logging.debug('\u001B[1mUE (' + device_id + ') Attach Completed\u001B[0m')
lock.acquire()
statusQueue.put(max_count - count)
statusQueue.put(device_id)
statusQueue.put('Attach Completed')
lock.release()
break
count = count - 1
if count == 15 or count == 30:
logging.debug('\u001B[1;30;43m Retry UE (' + device_id + ') Flight Mode Off \u001B[0m')
if self.ADBCentralized:
#RH quick add on to intgrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
time.sleep(0.5)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedre defined incots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode OFF (ie Radio ON)
COTS_UE.Set_Airplane(device_id, 'OFF')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(0.5)
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until mDataConnectionState=2 (' + str(max_count-count) + ' times)\u001B[0m')
time.sleep(1)
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Attach Failed \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('Attach Failed')
lock.release()
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE(self,HTML,RAN,EPC,COTS_UE,InfraUE):
if self.ue_id=='':#no ID specified, then it is a COTS controlled by ADB
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
multi_jobs = []
status_queue = SimpleQueue()
lock = Lock()
nb_ue_to_connect = 0
for device_id in self.UEDevices:
if (self.nbMaxUEtoAttach == -1) or (nb_ue_to_connect < self.nbMaxUEtoAttach):
self.UEDevicesStatus[nb_ue_to_connect] = CONST.UE_STATUS_ATTACHING
p = Process(target = self.AttachUE_common, args = (device_id, status_queue, lock,nb_ue_to_connect,COTS_UE,))
p.daemon = True
p.start()
multi_jobs.append(p)
nb_ue_to_connect = nb_ue_to_connect + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
else:
attach_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
attach_status = False
device_id = status_queue.get()
message = status_queue.get()
if (count < 0):
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
else:
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + ' in ' + str(count + 2) + ' seconds</pre>'
html_queue.put(html_cell)
if (attach_status):
cnt = 0
while cnt < len(self.UEDevices):
if self.UEDevicesStatus[cnt] == CONST.UE_STATUS_ATTACHING:
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_ATTACHED
cnt += 1
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
result = re.search('T_stdout', str(RAN.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
else:
HTML.CreateHtmlTestRowQueue('N/A', 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else: #if an ID is specified, it is a module from the yaml infrastructure file
#Attention, as opposed to InitializeUE, the connect manager process is not checked as it is supposed to be active already
#only 1- module wakeup, 2- check IP address
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
status = -1
cnt = 0
while cnt < 4:
Module_UE.Command("wup")
logging.debug("Waiting for IP address to be assigned")
time.sleep(20)
logging.debug("Retrieve IP address")
status=Module_UE.GetModuleIPAddress()
if status==0:
cnt = 10
else:
cnt += 1
Module_UE.Command("detach")
time.sleep(20)
if cnt == 10 and status == 0:
HTML.CreateHtmlTestRow(Module_UE.UEIPAddress, 'OK', CONST.ALL_PROCESSES_OK)
logging.debug('UE IP addresss : '+ Module_UE.UEIPAddress)
#execute additional commands from yaml file after UE attach
SSH = sshconnection.SSHConnection()
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
if hasattr(Module_UE,'StartCommands'):
for startcommand in Module_UE.StartCommands:
cmd = 'echo ' + Module_UE.HostPassword + ' | ' + startcommand
SSH.command(cmd,'\$',5)
SSH.close()
#check that the MTU is as expected / requested
Module_UE.CheckModuleMTU()
else: #status==-1 failed to retrieve IP address
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
def DetachUE_common(self, device_id, idx,COTS_UE):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
#RH quick add on to integrate cots control defined by yaml
#if device id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id,'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DetachUE(self,HTML,RAN,EPC,COTS_UE,InfraUE):
if self.ue_id=='':#no ID specified, then it is a COTS controlled by ADB
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
multi_jobs = []
cnt = 0
for device_id in self.UEDevices:
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_DETACHING
p = Process(target = self.DetachUE_common, args = (device_id,cnt,COTS_UE,))
p.daemon = True
p.start()
multi_jobs.append(p)
cnt += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
result = re.search('T_stdout', str(RAN.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus[cnt] = CONST.UE_STATUS_DETACHED
cnt += 1
else:#if an ID is specified, it is a module from the yaml infrastructure file
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
Module_UE.Command("detach")
HTML.CreateHtmlTestRow('NA', 'OK', CONST.ALL_PROCESSES_OK)
def RebootUE_common(self, device_id):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
previousmDataConnectionStates = []
# Save mDataConnectionState
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
sys.exit(1)
previousmDataConnectionStates.append(int(result.group('state')))
# Reboot UE
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell reboot', '\$', 10)
time.sleep(60)
previousmDataConnectionState = previousmDataConnectionStates.pop(0)
count = 180
while count > 0:
count = count - 1
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', SSH.getBefore())
if result is None:
mDataConnectionState = None
else:
mDataConnectionState = int(result.group('state'))
logging.debug('mDataConnectionState = ' + result.group('state'))
if mDataConnectionState is None or (previousmDataConnectionState == 2 and mDataConnectionState != 2):
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until reboot completion (' + str(180-count) + ' times)\u001B[0m')
time.sleep(1)
else:
logging.debug('\u001B[1mUE (' + device_id + ') Reboot Completed\u001B[0m')
break
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Reboot Failed \u001B[0m')
sys.exit(1)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def RebootUE(self,HTML,RAN,EPC):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
self.ConditionalExit()
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.RebootUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def DataDisableUE_common(self, device_id, idx):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# disable data service
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data disable"', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data disable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Disabled Data Service\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataDisableUE(self,HTML):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataDisableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def DataEnableUE_common(self, device_id, idx):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Enabled Data Service\u001B[0m')
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataEnableUE(self,HTML):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataEnableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def GetAllUEDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('adb devices', '\$', 15)
self.UEDevices = re.findall("\\\\r\\\\n([A-Za-z0-9]+)\\\\tdevice",SSH.getBefore())
#report number and id of devices found
msg = "UEDevices found by GetAllUEDevices : " + " ".join(self.UEDevices)
logging.debug(msg)
SSH.close()
else:
if (os.path.isfile('./phones_list.txt')):
os.remove('./phones_list.txt')
SSH.command('ls /etc/*/phones*.txt', '\$', 5)
result = re.search('/etc/ci/phones_list.txt', SSH.getBefore())
SSH.close()
if (result is not None) and (len(self.UEDevices) == 0):
SSH.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, '/etc/ci/phones_list.txt', '.')
if (os.path.isfile('./phones_list.txt')):
phone_list_file = open('./phones_list.txt', 'r')
for line in phone_list_file.readlines():
line = line.strip()
result = re.search('^#', line)
if result is not None:
continue
comma_split = line.split(",")
self.UEDevices.append(comma_split[0])
self.UEDevicesRemoteServer.append(comma_split[1])
self.UEDevicesRemoteUser.append(comma_split[2])
self.UEDevicesOffCmd.append(comma_split[3])
self.UEDevicesOnCmd.append(comma_split[4])
self.UEDevicesRebootCmd.append(comma_split[5])
phone_list_file.close()
# better handling of the case when no UE detected
# Sys-exit is now dealt by the calling function
if terminate_ue_flag == True:
if len(self.UEDevices) == 0:
logging.debug('\u001B[1;37;41m UE Not Found! \u001B[0m')
return False
if len(self.UEDevicesStatus) == 0:
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus.append(CONST.UE_STATUS_DETACHED)
cnt += 1
return True
def GetAllCatMDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
#self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
else:
if (os.path.isfile('./modules_list.txt')):
os.remove('./modules_list.txt')
SSH.command('ls /etc/*/modules*.txt', '\$', 5)
result = re.search('/etc/ci/modules_list.txt', SSH.getBefore())
SSH.close()
if result is not None:
logging.debug('Found a module list file on ADB server')
if terminate_ue_flag == True:
if len(self.CatMDevices) == 0:
logging.debug('\u001B[1;37;41m CAT-M UE Not Found! \u001B[0m')
sys.exit(1)
SSH.close()
def CheckUEStatus_common(self, lock, device_id, statusQueue, idx):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry"', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\'', '\$', 60)
result = re.search('mServiceState=(?P<serviceState>[0-9]+)', SSH.getBefore())
serviceState = 'Service State: UNKNOWN'
if result is not None:
lServiceState = int(result.group('serviceState'))
if lServiceState == 3:
serviceState = 'Service State: RADIO_POWERED_OFF'
if lServiceState == 1:
serviceState = 'Service State: OUT_OF_SERVICE'
if lServiceState == 0:
serviceState = 'Service State: IN_SERVICE'
if lServiceState == 2:
serviceState = 'Service State: EMERGENCY_ONLY'
result = re.search('mDataConnectionState=(?P<dataConnectionState>[0-9]+)', SSH.getBefore())
dataConnectionState = 'Data State: UNKNOWN'
if result is not None:
lDataConnectionState = int(result.group('dataConnectionState'))
if lDataConnectionState == 0:
dataConnectionState = 'Data State: DISCONNECTED'
if lDataConnectionState == 1:
dataConnectionState = 'Data State: CONNECTING'
if lDataConnectionState == 2:
dataConnectionState = 'Data State: CONNECTED'
if lDataConnectionState == 3:
dataConnectionState = 'Data State: SUSPENDED'
result = re.search('mDataConnectionReason=(?P<dataConnectionReason>[0-9a-zA-Z_]+)', SSH.getBefore())
time.sleep(1)
SSH.close()
dataConnectionReason = 'Data Reason: UNKNOWN'
if result is not None:
dataConnectionReason = 'Data Reason: ' + result.group('dataConnectionReason')
lock.acquire()
logging.debug('\u001B[1;37;44m Status Check (' + str(device_id) + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + serviceState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionReason + '\u001B[0m')
statusQueue.put(0)
statusQueue.put(device_id)
qMsg = serviceState + '\n' + dataConnectionState + '\n' + dataConnectionReason
statusQueue.put(qMsg)
lock.release()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckStatusUE(self,HTML,RAN,EPC,COTS_UE,InfraUE):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow('N/A', 'KO', pStatus)
HTML.CreateHtmlTabFooter(False)
self.ConditionalExit()
multi_jobs = []
lock = Lock()
status_queue = SimpleQueue()
i = 0
for device_id in self.UEDevices:
p = Process(target = self.CheckUEStatus_common, args = (lock,device_id,status_queue,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
if (RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted) or RAN.flexranCtrlDeployed:
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
SSH.command('curl http://' + RAN.flexranCtrlIpAddress + ':9999/stats | jq \'.\' > check_status_' + self.testCase_id + '.log 2>&1', '\$', 5)
SSH.command('cat check_status_' + self.testCase_id + '.log | jq \'.eNB_config[0].UE\' | grep -c rnti | sed -e "s#^#Nb Connected UE = #"', '\$', 5)
result = re.search('Nb Connected UE = (?P<nb_ues>[0-9]+)', SSH.getBefore())
passStatus = True
if result is not None:
nb_ues = int(result.group('nb_ues'))
htmlOptions = 'Nb Connected UE(s) to eNB = ' + str(nb_ues)
logging.debug('\u001B[1;37;44m ' + htmlOptions + ' \u001B[0m')
if self.expectedNbOfConnectedUEs > -1:
if nb_ues != self.expectedNbOfConnectedUEs:
passStatus = False
else:
htmlOptions = 'N/A'
SSH.close()
else:
passStatus = True
htmlOptions = 'N/A'
if (status_queue.empty()):
HTML.CreateHtmlTestRow(htmlOptions, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else:
check_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
check_status = False
device_id = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
html_queue.put(html_cell)
if check_status and passStatus:
HTML.CreateHtmlTestRowQueue(htmlOptions, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(htmlOptions, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def GetAllUEIPAddresses(self):
SSH = sshconnection.SSHConnection()
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
ue_ip_status = 0
self.UEIPAddresses = []
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('ifconfig oaitun_ue1', '\$', 4)
result = re.search('inet addr:(?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)|inet (?P<ueipaddress2>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', SSH.getBefore())
if result is not None:
if result.group('ueipaddress') is not None:
UE_IPAddress = result.group('ueipaddress')
else:
UE_IPAddress = result.group('ueipaddress2')
logging.debug('\u001B[1mUE (' + self.UEDevices[0] + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
self.UEIPAddresses.append(UE_IPAddress)
else:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
ue_ip_status -= 1
SSH.close()
return ue_ip_status
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
idx = 0
for device_id in self.UEDevices:
if self.UEDevicesStatus[idx] != CONST.UE_STATUS_ATTACHED:
idx += 1
continue
count = 0
while count < 4:
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "ip addr show | grep rmnet"', '\$', 15)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ip addr show | grep rmnet"\'', '\$', 60)
result = re.search('inet (?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\/[0-9]+[0-9a-zA-Z\.\s]+', SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
time.sleep(1)
count += 1
else:
count = 10
if count < 9:
ue_ip_status -= 1
continue
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + device_id + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
for ueipaddress in self.UEIPAddresses:
if ueipaddress == UE_IPAddress:
logging.debug('\u001B[1mUE (' + device_id + ') IP Address ' + UE_IPAddress + ': has already been allocated to another device !' + '\u001B[0m')
ue_ip_status -= 1
continue
self.UEIPAddresses.append(UE_IPAddress)
idx += 1
SSH.close()
return ue_ip_status
def ping_iperf_wrong_exit(self, lock, UE_IPAddress, device_id, statusQueue, message):
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(message)
lock.release()
def Ping_common(self, lock, UE_IPAddress, device_id, statusQueue,EPC, Module_UE):
try:
SSH = sshconnection.SSHConnection()
# Launch ping on the EPC side (true for ltebox and old open-air-cn)
# But for OAI-Rel14-CUPS, we launch from python executor
launchFromEpc = True
launchFromModule = False
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
#if module, ping from module to EPC
if self.ue_id!='':
launchFromEpc = False
launchfromModule = True
ping_time = re.findall("-c (\d+)",str(self.ping_args))
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
# In case of a docker-based deployment, we need to ping from the trf-gen container
launchFromTrfContainer = False
if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
launchFromTrfContainer = True
if launchFromTrfContainer:
ping_status = SSH.command('docker exec -it prod-trf-gen /bin/bash -c "ping ' + self.ping_args + ' ' + UE_IPAddress + '" 2>&1 | tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
else:
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
#copy the ping log file to have it locally for analysis (ping stats)
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath + '/scripts/ping_' + self.testCase_id + '_' + device_id + '.log', '.')
else:
if launchfromModule == False:
#ping log file is on the python executor
cmd = 'ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 > ping_' + self.testCase_id + '_' + device_id + '.log'
message = cmd + '\n'
logging.debug(cmd)
ret = subprocess.run(cmd, shell=True)
ping_status = ret.returncode
#copy the ping log file to an other folder for log collection (source and destination are EPC)
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'ping_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
#copy the ping log file to have it locally for analysis (ping stats)
logging.debug(EPC.SourceCodePath + 'ping_' + self.testCase_id + '_' + device_id + '.log')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath +'/scripts/ping_' + self.testCase_id + '_' + device_id + '.log', '.')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
#cat is executed on EPC
SSH.command('cat ' + EPC.SourceCodePath + '/scripts/ping_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
else: #launch from Module
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
#target address is different depending on EPC type
if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
Target = EPC.MmeIPAddress
elif re.match('OAICN5G', EPC.Type, re.IGNORECASE):
Target = '8.8.8.8'
else:
Target = EPC.IPAddress
#ping from module NIC rather than IP address to make sure round trip is over the air
cmd = 'ping -I ' + Module_UE.UENetwork + ' ' + self.ping_args + ' ' + Target + ' 2>&1 > ping_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',int(ping_time[0])*1.5)
#copy the ping log file to have it locally for analysis (ping stats)
SSH.copyin(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword, 'ping_' + self.testCase_id + '_' + self.ue_id + '.log', '.')
#cat is executed locally
SSH.command('cat ping_' + self.testCase_id + '_' + self.ue_id + '.log', '\$', 5)
ping_status=0
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
#search is done on cat result
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
#adding extra ping stats from local file
ping_log_file='ping_' + self.testCase_id + '_' + device_id + '.log'
logging.debug('Analyzing Ping log file : ' + os.getcwd() + '/' + ping_log_file)
ping_stat=GetPingTimeAnalysis(ping_log_file)
ping_stat_msg=''
if (ping_stat!=-1) and (len(ping_stat)!=0):
ping_stat_msg+='Ping stats before removing largest value : \n'
ping_stat_msg+='RTT(Min) : ' + str("{:.2f}".format(ping_stat['min_0'])) + 'ms \n'
ping_stat_msg+='RTT(Mean) : ' + str("{:.2f}".format(ping_stat['mean_0'])) + 'ms \n'
ping_stat_msg+='RTT(Median) : ' + str("{:.2f}".format(ping_stat['median_0'])) + 'ms \n'
ping_stat_msg+='RTT(Max) : ' + str("{:.2f}".format(ping_stat['max_0'])) + 'ms \n'
ping_stat_msg+='Max Index : ' + str(ping_stat['max_loc']) + '\n'
ping_stat_msg+='Ping stats after removing largest value : \n'
ping_stat_msg+='RTT(Min) : ' + str("{:.2f}".format(ping_stat['min_1'])) + 'ms \n'
ping_stat_msg+='RTT(Mean) : ' + str("{:.2f}".format(ping_stat['mean_1'])) + 'ms \n'
ping_stat_msg+='RTT(Median) : ' + str("{:.2f}".format(ping_stat['median_1'])) + 'ms \n'
ping_stat_msg+='RTT(Max) : ' + str("{:.2f}".format(ping_stat['max_1'])) + 'ms \n'
#building html message
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg + '\n' + ping_stat_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
if (packetLossOK):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(qMsg)
lock.release()
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def PingNoS1_wrong_exit(self, qMsg,HTML):
html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
def PingNoS1(self,HTML,RAN,EPC,COTS_UE,InfraUE):
SSH=sshconnection.SSHConnection()
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
ping_from_eNB = re.search('oaitun_enb1', str(self.ping_args))
if ping_from_eNB is not None:
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
else:
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
try:
if ping_from_eNB is not None:
SSH.open(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword)
SSH.command('cd ' + RAN.eNBSourceCodePath + '/cmake_targets/', '\$', 5)
else:
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets/', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = SSH.command('stdbuf -o0 ping ' + self.ping_args + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with OAI UE crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message,HTML)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', SSH.getBefore())
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message,HTML)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message,HTML)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', SSH.getBefore())
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message,HTML)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
logging.debug('\u001B[1;37;44m OAI UE ping result \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
SSH.close()
html_queue = SimpleQueue()
ip_addr = 'TBD'
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
if packetLossOK:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
# copying on the EPC server for logCollection
if ping_from_eNB is not None:
copyin_res = SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
else:
copyin_res = SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'ping_' + self.testCase_id + '.log', EPC.SourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Ping(self,HTML,RAN,EPC,COTS_UE, InfraUE):
result = re.search('noS1', str(RAN.Initialize_eNB_args))
if result is not None:
self.PingNoS1(HTML,RAN,EPC,COTS_UE,InfraUE)
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
if self.ue_id=="":
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra['dummy']) #RH, temporary, we need a dummy Module_UE object to pass to Ping_common
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
else:
self.UEIPAddresses=[]
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
Module_UE.GetModuleIPAddress()
self.UEIPAddresses.append(Module_UE.UEIPAddress)
logging.debug(self.UEIPAddresses)
multi_jobs = []
i = 0
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
if self.ue_id=="":
device_id = self.UEDevices[i]
else:
device_id = Module_UE.ID + "-" + Module_UE.Kind
p = Process(target = self.Ping_common, args = (lock,UE_IPAddress,device_id,status_queue,EPC,Module_UE,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow(self.ping_args, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else:
ping_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
ping_status = False
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (ping_status):
HTML.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def Iperf_ComputeTime(self):
result = re.search('-t (?P<iperf_time>\d+)', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf time Not Found! \u001B[0m')
sys.exit(1)
return result.group('iperf_time')
def Iperf_ComputeModifiedBW(self, idx, ue_num):
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf bandwidth Not Found! \u001B[0m')
sys.exit(1)
iperf_bandwidth = result.group('iperf_bandwidth')
if self.iperf_profile == 'balanced':
iperf_bandwidth_new = float(iperf_bandwidth)/ue_num
if self.iperf_profile == 'single-ue':
iperf_bandwidth_new = float(iperf_bandwidth)
if self.iperf_profile == 'unbalanced':
# residual is 2% of max bw
residualBW = float(iperf_bandwidth) / 50
if idx == 0:
iperf_bandwidth_new = float(iperf_bandwidth) - ((ue_num - 1) * residualBW)
else:
iperf_bandwidth_new = residualBW
iperf_bandwidth_str = '-b ' + iperf_bandwidth
iperf_bandwidth_str_new = '-b ' + ('%.2f' % iperf_bandwidth_new)
result = re.sub(iperf_bandwidth_str, iperf_bandwidth_str_new, str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Calculate Iperf bandwidth Failed! \u001B[0m')
sys.exit(1)
return result
def Iperf_analyzeV2TCPOutput(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options,EPC,SSH):
SSH.command('awk -f /tmp/tcp_iperf_stats.awk ' + EPC.SourceCodePath + '/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('Avg Bitrate : (?P<average>[0-9\.]+ Mbits\/sec) Max Bitrate : (?P<maximum>[0-9\.]+ Mbits\/sec) Min Bitrate : (?P<minimum>[0-9\.]+ Mbits\/sec)', SSH.getBefore())
if result is not None:
avgbitrate = result.group('average')
maxbitrate = result.group('maximum')
minbitrate = result.group('minimum')
lock.acquire()
logging.debug('\u001B[1;37;44m TCP iperf result (' + UE_IPAddress + ') \u001B[0m')
msg = 'TCP Stats :\n'
if avgbitrate is not None:
logging.debug('\u001B[1;34m Avg Bitrate : ' + avgbitrate + '\u001B[0m')
msg += 'Avg Bitrate : ' + avgbitrate + '\n'
if maxbitrate is not None:
logging.debug('\u001B[1;34m Max Bitrate : ' + maxbitrate + '\u001B[0m')
msg += 'Max Bitrate : ' + maxbitrate + '\n'
if minbitrate is not None:
logging.debug('\u001B[1;34m Min Bitrate : ' + minbitrate + '\u001B[0m')
msg += 'Min Bitrate : ' + minbitrate + '\n'
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Output(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options,EPC,SSH):
result = re.search('-u', str(iperf_real_options))
if result is None:
logging.debug('Into Iperf_analyzeV2TCPOutput client')
response = self.Iperf_analyzeV2TCPOutput(lock, UE_IPAddress, device_id, statusQueue, iperf_real_options,EPC,SSH)
logging.debug('Iperf_analyzeV2TCPOutput response returned value = ' + str(response))
return response
result = re.search('Server Report:', SSH.getBefore())
if result is None:
result = re.search('read failed: Connection refused', SSH.getBefore())
if result is not None:
logging.debug('\u001B[1;37;41m Could not connect to iperf server! \u001B[0m')
else:
logging.debug('\u001B[1;37;41m Server Report and Connection refused Not Found! \u001B[0m')
return -1
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is not None:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
result = re.search('Server Report:\\\\r\\\\n(?:|\[ *\d+\].*) (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(\d+\/..\d+) +(\((?P<packetloss>[0-9\.]+)%\))', SSH.getBefore())
if result is not None:
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
jitter = result.group('jitter')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
iperfStatus = True
msg = 'Req Bitrate : ' + req_bandwidth + '\n'
logging.debug('\u001B[1;34m Req Bitrate : ' + req_bandwidth + '\u001B[0m')
if bitrate is not None:
msg += 'Bitrate : ' + bitrate + '\n'
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
result = re.search('(?P<real_bw>[0-9\.]+) [KMG]bits/sec', str(bitrate))
if result is not None:
actual_bw = float(str(result.group('real_bw')))
result = re.search('[0-9\.]+ K', bitrate)
if result is not None:
actual_bw = actual_bw * 1000
result = re.search('[0-9\.]+ M', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000
result = re.search('[0-9\.]+ G', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000000
br_loss = 100 * actual_bw / req_bw
bitperf = '%.2f ' % br_loss
msg += 'Bitrate Perf: ' + bitperf + '%\n'
logging.debug('\u001B[1;34m Bitrate Perf: ' + bitperf + '%\u001B[0m')
if packetloss is not None:
msg += 'Packet Loss : ' + packetloss + '%\n'
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
if float(packetloss) > float(self.iperf_packetloss_threshold):
msg += 'Packet Loss too high!\n'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
iperfStatus = False
if jitter is not None:
msg += 'Jitter : ' + jitter + '\n'
logging.debug('\u001B[1;34m Jitter : ' + jitter + '\u001B[0m')
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
else:
return -2
def Iperf_analyzeV2Server(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options, filename,type):
if (not os.path.isfile(filename)):
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
return
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is None:
logging.debug('Iperf bandwidth Not Found!')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not compute Iperf bandwidth!')
return
else:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
server_file = open(filename, 'r')
br_sum = 0.0
ji_sum = 0.0
pl_sum = 0
ps_sum = 0
row_idx = 0
for line in server_file.readlines():
if type==0:
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)/ +(?P<sentPack>[0-9]+)', str(line))
else:
result = re.search('^\[\s+\d\].+ (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)\/\s*(?P<sentPack>[0-9]+)', str(line))
if result is not None:
bitrate = result.group('bitrate')
jitter = result.group('jitter')
packetlost = result.group('lostPack')
packetsent = result.group('sentPack')
br = bitrate.split(' ')
ji = jitter.split(' ')
row_idx = row_idx + 1
curr_br = float(br[0])
pl_sum = pl_sum + int(packetlost)
ps_sum = ps_sum + int(packetsent)
if (br[1] == 'Kbits/sec'):
curr_br = curr_br * 1000
if (br[1] == 'Mbits/sec'):
curr_br = curr_br * 1000 * 1000
br_sum = curr_br + br_sum
ji_sum = float(ji[0]) + ji_sum
if (row_idx > 0):
br_sum = br_sum / row_idx
ji_sum = ji_sum / row_idx
br_loss = 100 * br_sum / req_bw
if (br_sum > 1000):
br_sum = br_sum / 1000
if (br_sum > 1000):
br_sum = br_sum / 1000
bitrate = '%.2f Mbits/sec' % br_sum
else:
bitrate = '%.2f Kbits/sec' % br_sum
else:
bitrate = '%.2f bits/sec' % br_sum
bitperf = '%.2f ' % br_loss
bitperf += '%'
jitter = '%.2f ms' % (ji_sum)
if (ps_sum > 0):
pl = float(100 * pl_sum / ps_sum)
packetloss = '%2.1f ' % (pl)
packetloss += '%'
if float(pl) > float(self.iperf_packetloss_threshold):
pal_too_high_msg = 'Packet Loss too high : actual = '+packetloss+', target = '+self.iperf_packetloss_threshold+'%\n'
else:
pal_too_high_msg=''
lock.acquire()
if (br_loss < 90) or (float(pl) > float(self.iperf_packetloss_threshold)):
statusQueue.put(1)
else:
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
req_msg = 'Req Bitrate : ' + req_bandwidth
bir_msg = 'Bitrate : ' + bitrate
brl_msg = 'Bitrate Perf: ' + bitperf
jit_msg = 'Jitter : ' + jitter
pal_msg = 'Packet Loss : ' + packetloss
statusQueue.put(req_msg + '\n' + bir_msg + '\n' + brl_msg + '\n' + jit_msg + '\n' + pal_msg + '\n' + pal_too_high_msg + '\n')
logging.debug('\u001B[1;37;45m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;35m ' + req_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + bir_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + brl_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + jit_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_too_high_msg + '\u001B[0m')
lock.release()
else:
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
server_file.close()
def Iperf_analyzeV3Output(self, lock, UE_IPAddress, device_id, statusQueue,SSH):
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?:|[0-9\.]+ ms +\d+\/\d+ \((?P<packetloss>[0-9\.]+)%\)) +(?:|receiver)\\\\r\\\\n(?:|\[ *\d+\] Sent \d+ datagrams)\\\\r\\\\niperf Done\.', SSH.getBefore())
if result is None:
result = re.search('(?P<error>iperf: error - [a-zA-Z0-9 :]+)', SSH.getBefore())
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
if result is not None:
logging.debug('\u001B[1;37;41m ' + result.group('error') + ' \u001B[0m')
statusQueue.put(result.group('error'))
else:
logging.debug('\u001B[1;37;41m Bitrate and/or Packet Loss Not Found! \u001B[0m')
statusQueue.put('Bitrate and/or Packet Loss Not Found!')
lock.release()
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
msg = 'Bitrate : ' + bitrate + '\n'
iperfStatus = True
if packetloss is not None:
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
msg += 'Packet Loss : ' + packetloss + '%\n'
if float(packetloss) > float(self.iperf_packetloss_threshold):
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
msg += 'Packet Loss too high!\n'
iperfStatus = False
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
def Iperf_UL_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue,EPC):
SSH = sshconnection.SSHConnection()
udpIperf = True
result = re.search('-u', str(self.iperf_args))
if result is None:
udpIperf = False
ipnumbers = UE_IPAddress.split('.')
if (len(ipnumbers) == 4):
ipnumbers[3] = '1'
EPC_Iperf_UE_IPAddress = ipnumbers[0] + '.' + ipnumbers[1] + '.' + ipnumbers[2] + '.' + ipnumbers[3]
# Launch iperf server on EPC side (true for ltebox and old open-air-cn0
# But for OAI-Rel14-CUPS, we launch from python executor and we are using its IP address as iperf client address
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
cmd = 'hostname -I'
ret = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
if ret.stdout is not None:
EPC_Iperf_UE_IPAddress = ret.stdout.strip()
# When using a docker-based deployment, IPERF client shall be launched from trf container
launchFromTrfContainer = False
if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
launchFromTrfContainer = True
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('docker inspect --format="TRF_IP_ADDR = {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" prod-trf-gen', '\$', 5)
result = re.search('TRF_IP_ADDR = (?P<trf_ip_addr>[0-9\.]+)', SSH.getBefore())
if result is not None:
EPC_Iperf_UE_IPAddress = result.group('trf_ip_addr')
SSH.close()
port = 5001 + idx
udpOptions = ''
if udpIperf:
udpOptions = '-u '
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if launchFromTrfContainer:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/iperf-2.0.5/bin/'
SSH.command('docker exec -d prod-trf-gen /bin/bash -c "nohup ' + prefix + 'iperf ' + udpOptions + '-s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &"', '\$', 5)
else:
SSH.command('echo $USER; nohup iperf ' + udpOptions + '-s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', EPC.UserName, 5)
SSH.close()
else:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/opt/iperf-2.0.5/bin/'
cmd = 'nohup ' + prefix + 'iperf ' + udpOptions + '-s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 &'
logging.debug(cmd)
subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
time.sleep(0.5)
# Launch iperf client on UE
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
SSH.command('cd ' + EPC.SourceCodePath+ '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
modified_options = modified_options.replace('-R','')
time.sleep(0.5)
SSH.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (device_id == 'OAI-UE'):
iperf_status = SSH.command('iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + ' -B ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
if self.ADBCentralized:
iperf_status = SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '" 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
iperf_status = SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '"\' 2>&1 > iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
SSH.command('fromdos -o iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
SSH.command('cat iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
# TIMEOUT Case
if iperf_status < 0:
SSH.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options,EPC,SSH)
SSH.close()
# Kill iperf server on EPC side
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
if launchFromTrfContainer:
SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5)
else:
SSH.command('killall --signal SIGKILL iperf', EPC.UserName, 5)
SSH.close()
else:
cmd = 'killall --signal SIGKILL iperf'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
time.sleep(1)
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
# in case of failure, retrieve server log
if (clientStatus == -1) or (clientStatus == -2):
if launchFromEpc:
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
if launchFromTrfContainer:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('docker cp prod-trf-gen:/iperf-2.0.5/iperf_server_' + self.testCase_id + '_' + device_id + '.log ' + EPC.SourceCodePath + '/scripts', '\$', 5)
SSH.close()
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath+ '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
filename='iperf_server_' + self.testCase_id + '_' + device_id + '.log'
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options,filename,0)
# in case of OAI-UE
if (device_id == 'OAI-UE'):
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_' + self.testCase_id + '_' + device_id + '.log', '.')
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
def Iperf_Module(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue,EPC, Module_UE):
if (re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE)) or (re.match('OAICN5G', EPC.Type, re.IGNORECASE)):
#retrieve trf-gen container IP address
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('docker inspect --format="TRF_IP_ADDR = {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" prod-trf-gen', '\$', 5)
result = re.search('TRF_IP_ADDR = (?P<trf_ip_addr>[0-9\.]+)', SSH.getBefore())
if result is not None:
trf_gen_IP = result.group('trf_ip_addr')
SSH.close()
#kill iperf processes on UE side before (in case there are still some remaining)
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'killall --signal=SIGKILL iperf'
SSH.command(cmd,'\$',5)
SSH.close()
iperf_time = self.Iperf_ComputeTime()
if self.iperf_direction=="DL":
logging.debug("Iperf for Module in DL mode detected")
#server side UE
server_filename='iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'rm ' + server_filename
SSH.command(cmd,'\$',5)
cmd = 'echo $USER; nohup iperf -s -B ' + UE_IPAddress + ' -u 2>&1 > ' + server_filename + ' &'
SSH.command(cmd,'\$',5)
SSH.close()
#client side EPC
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
client_filename = 'iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5)
iperf_cmd = 'bin/iperf -c ' + UE_IPAddress + ' ' + self.iperf_args + ' 2>&1 > ' + client_filename
cmd = 'docker exec -it prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"'
SSH.command(cmd,'\$',int(iperf_time)*5.0)
SSH.command('docker cp prod-trf-gen:/iperf-2.0.13/'+ client_filename + ' ' + EPC.SourceCodePath, '\$', 5)
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath + '/' + client_filename, '.')
SSH.close()
#copy the 2 resulting files locally
SSH.copyin(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword, server_filename, '.')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, client_filename, '.')
#send for analysis
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, self.iperf_args,server_filename,1)
elif self.iperf_direction=="UL":
logging.debug("Iperf for Module in UL mode detected")
#server side EPC
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
server_filename = 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5)
iperf_cmd = 'echo $USER; nohup bin/iperf -s -u 2>&1 > ' + server_filename
cmd = 'docker exec -it prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"'
SSH.command(cmd,'\$',5)
SSH.close()
#client side UE
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
client_filename = 'iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log'
cmd = 'rm '+ client_filename
SSH.command(cmd,'\$',5)
SSH.command('iperf -B ' + UE_IPAddress + ' -c ' + trf_gen_IP + ' ' + self.iperf_args + ' 2>&1 > ' + client_filename, '\$', int(iperf_time)*5.0)
SSH.close()
#once client is done, retrieve the server file from container to EPC Host
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('docker cp prod-trf-gen:/iperf-2.0.13/' + server_filename + ' ' + EPC.SourceCodePath, '\$', 5)
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, EPC.SourceCodePath + '/' + server_filename, '.')
SSH.close()
#copy the 2 resulting files locally
SSH.copyin(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword, client_filename, '.')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, server_filename, '.')
#send for analysis
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, self.iperf_args,server_filename,1)
else :
logging.debug("Incorrect or missing IPERF direction in XML")
else: #default is ltebox
SSH = sshconnection.SSHConnection()
#kill iperf processes before (in case there are still some remaining)
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'killall --signal=SIGKILL iperf'
SSH.command(cmd,'\$',5)
SSH.close()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
cmd = 'killall --signal=SIGKILL iperf'
SSH.command(cmd,'\$',5)
SSH.close()
iperf_time = self.Iperf_ComputeTime()
if self.iperf_direction=="DL":
logging.debug("Iperf for Module in DL mode detected")
#server side UE
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'rm iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',5)
cmd = 'echo $USER; nohup /opt/iperf-2.0.10/iperf -s -B ' + UE_IPAddress + ' -u 2>&1 > iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log &'
SSH.command(cmd,'\$',5)
SSH.close()
#client side EPC
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
cmd = 'rm iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',5)
cmd = 'iperf -c ' + UE_IPAddress + ' ' + self.iperf_args + ' 2>&1 > iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',int(iperf_time)*5.0)
SSH.close()
#copy the 2 resulting files locally
SSH.copyin(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword, 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log', '.')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log', '.')
#send for analysis
filename='iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, self.iperf_args,filename,1)
elif self.iperf_direction=="UL":
logging.debug("Iperf for Module in UL mode detected")
#server side EPC
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
cmd = 'rm iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',5)
cmd = 'echo $USER; nohup iperf -s -u 2>&1 > iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log &'
SSH.command(cmd,'\$',5)
SSH.close()
#client side UE
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'rm iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log'
SSH.command(cmd,'\$',5)
SSH.command('/opt/iperf-2.0.10/iperf -c 192.172.0.1 ' + self.iperf_args + ' 2>&1 > iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log', '\$', int(iperf_time)*5.0)
SSH.close()
#copy the 2 resulting files locally
SSH.copyin(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword, 'iperf_client_' + self.testCase_id + '_' + self.ue_id + '.log', '.')
SSH.copyin(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log', '.')
#send for analysis
filename='iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, self.iperf_args,filename,1)
else :
logging.debug("Incorrect or missing IPERF direction in XML")
#kill iperf processes after to be clean
SSH.open(Module_UE.HostIPAddress, Module_UE.HostUsername, Module_UE.HostPassword)
cmd = 'killall --signal=SIGKILL iperf'
SSH.command(cmd,'\$',5)
SSH.close()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
cmd = 'killall --signal=SIGKILL iperf'
SSH.command(cmd,'\$',5)
SSH.close()
return
def Iperf_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue,EPC):
try:
SSH = sshconnection.SSHConnection()
# Single-UE profile -- iperf only on one UE
if self.iperf_profile == 'single-ue' and idx != 0:
return
useIperf3 = False
udpIperf = True
self.ueIperfVersion = '2.0.5'
if (device_id != 'OAI-UE'):
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# if by chance ADB server and EPC are on the same remote host, at least log collection will take care of it
SSH.command('if [ ! -d ' + EPC.SourceCodePath + '/scripts ]; then mkdir -p ' + EPC.SourceCodePath + '/scripts ; fi', '\$', 5)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
# Checking if iperf / iperf3 are installed
if self.ADBCentralized:
SSH.command('adb -s ' + device_id + ' shell "ls /data/local/tmp"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ls /data/local/tmp"\'', '\$', 60)
# DEBUG: disabling iperf3 usage for the moment
result = re.search('iperf4', SSH.getBefore())
if result is None:
result = re.search('iperf', SSH.getBefore())
if result is None:
message = 'Neither iperf nor iperf3 installed on UE!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
SSH.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
else:
if self.ADBCentralized:
SSH.command('adb -s ' + device_id + ' shell "/data/local/tmp/iperf --version"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf --version"\'', '\$', 60)
result = re.search('iperf version 2.0.5', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.5'
result = re.search('iperf version 2.0.10', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.10'
else:
useIperf3 = True
SSH.close()
else:
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('iperf --version', '\$', 5)
result = re.search('iperf version 2.0.5', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.5'
result = re.search('iperf version 2.0.10', SSH.getBefore())
if result is not None:
self.ueIperfVersion = '2.0.10'
SSH.close()
# in case of iperf, UL has its own function
if (not useIperf3):
result = re.search('-R', str(self.iperf_args))
if result is not None:
self.Iperf_UL_common(lock, UE_IPAddress, device_id, idx, ue_num, statusQueue,EPC)
return
# Launch the IPERF server on the UE side for DL
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
SSH.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
udpIperf = False
else:
SSH.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -u -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
if self.ADBCentralized:
if (useIperf3):
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/iperf3 -s &', '\$', 5)
else:
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
SSH.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
udpIperf = False
else:
SSH.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
else:
SSH.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" \' 2>&1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 60)
time.sleep(0.5)
SSH.close()
# Launch the IPERF client on the EPC side for DL (true for ltebox and old open-air-cn
# But for OAI-Rel14-CUPS, we launch from python executor
launchFromEpc = True
launchFromModule = False
if re.match('OAI-Rel14-CUPS', EPC.Type, re.IGNORECASE):
launchFromEpc = False
#if module
if self.ue_id!='' and self.iperf :
launchFromEpc = False
launchfromModule = True
# When using a docker-based deployment, IPERF client shall be launched from trf container
launchFromTrfContainer = False
if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
launchFromTrfContainer = True
if launchFromEpc:
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
time.sleep(0.5)
if launchFromEpc:
SSH.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
else:
if (os.path.isfile('iperf_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_' + self.testCase_id + '_' + device_id + '.log')
if (useIperf3):
SSH.command('stdbuf -o0 iperf3 -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
clientStatus = 0
self.Iperf_analyzeV3Output(lock, UE_IPAddress, device_id, statusQueue,SSH)
else:
if launchFromEpc:
if launchFromTrfContainer:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/iperf-2.0.5/bin/'
iperf_status = SSH.command('docker exec -it prod-trf-gen /bin/bash -c "' + prefix + 'iperf -c ' + UE_IPAddress + ' ' + modified_options + '" 2>&1 | tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
iperf_status = SSH.command('stdbuf -o0 iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
if self.ueIperfVersion == self.dummyIperfVersion:
prefix = ''
else:
prefix = ''
if self.ueIperfVersion == '2.0.5':
prefix = '/opt/iperf-2.0.5/bin/'
cmd = prefix + 'iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 > iperf_' + self.testCase_id + '_' + device_id + '.log'
message = cmd + '\n'
logging.debug(cmd)
ret = subprocess.run(cmd, shell=True)
iperf_status = ret.returncode
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cat ' + EPC.SourceCodePath + '/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if iperf_status < 0:
if launchFromEpc:
SSH.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
logging.debug('Into Iperf_analyzeV2Output client')
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options, EPC,SSH)
logging.debug('Iperf_analyzeV2Output clientStatus returned value = ' + str(clientStatus))
SSH.close()
# Kill the IPERF server that runs in background
if (device_id == 'OAI-UE'):
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('killall iperf', '\$', 5)
else:
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps" | grep --color=never iperf | grep -v grep\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', SSH.getBefore())
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
SSH.close()
# if the client report is absent, try to analyze the server log file
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
if (device_id == 'OAI-UE'):
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
else:
SSH.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, EPC.SourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
# fromdos has to be called on the python executor not on ADB server
cmd = 'fromdos -o iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 > /dev/null'
try:
subprocess.run(cmd, shell=True)
except:
pass
cmd = 'dos2unix -o iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 > /dev/null'
try:
subprocess.run(cmd, shell=True)
except:
pass
filename='iperf_server_' + self.testCase_id + '_' + device_id + '.log'
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options,filename,0)
# in case of OAI UE:
if (device_id == 'OAI-UE'):
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
if not launchFromEpc:
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
else:
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', EPC.SourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def IperfNoS1(self,HTML,RAN,EPC,COTS_UE,InfraUE):
SSH = sshconnection.SSHConnection()
if RAN.eNBIPAddress == '' or RAN.eNBUserName == '' or RAN.eNBPassword == '' or self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
server_on_enb = re.search('-R', str(self.iperf_args))
if server_on_enb is not None:
iServerIPAddr = RAN.eNBIPAddress
iServerUser = RAN.eNBUserName
iServerPasswd = RAN.eNBPassword
iClientIPAddr = self.UEIPAddress
iClientUser = self.UEUserName
iClientPasswd = self.UEPassword
else:
iServerIPAddr = self.UEIPAddress
iServerUser = self.UEUserName
iServerPasswd = self.UEPassword
iClientIPAddr = RAN.eNBIPAddress
iClientUser = RAN.eNBUserName
iClientPasswd = RAN.eNBPassword
if self.iperf_options != 'sink':
# Starting the iperf server
SSH.open(iServerIPAddr, iServerUser, iServerPasswd)
# args SHALL be "-c client -u any"
# -c 10.0.1.2 -u -b 1M -t 30 -i 1 -fm -B 10.0.1.1
# -B 10.0.1.1 -u -s -i 1 -fm
server_options = re.sub('-u.*$', '-u -s -i 1 -fm', str(self.iperf_args))
server_options = server_options.replace('-c','-B')
SSH.command('rm -f /tmp/tmp_iperf_server_' + self.testCase_id + '.log', '\$', 5)
SSH.command('echo $USER; nohup iperf ' + server_options + ' > /tmp/tmp_iperf_server_' + self.testCase_id + '.log 2>&1 &', iServerUser, 5)
time.sleep(0.5)
SSH.close()
# Starting the iperf client
modified_options = self.Iperf_ComputeModifiedBW(0, 1)
modified_options = modified_options.replace('-R','')
iperf_time = self.Iperf_ComputeTime()
SSH.open(iClientIPAddr, iClientUser, iClientPasswd)
SSH.command('rm -f /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', 5)
iperf_status = SSH.command('stdbuf -o0 iperf ' + modified_options + ' 2>&1 | stdbuf -o0 tee /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', int(iperf_time)*5.0)
status_queue = SimpleQueue()
lock = Lock()
if iperf_status < 0:
message = 'iperf on OAI UE crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
clientStatus = -2
else:
if self.iperf_options == 'sink':
clientStatus = 0
status_queue.put(0)
status_queue.put('OAI-UE')
status_queue.put('10.0.1.2')
status_queue.put('Sink Test : no check')
else:
clientStatus = self.Iperf_analyzeV2Output(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options, EPC,SSH)
SSH.close()
# Stopping the iperf server
if self.iperf_options != 'sink':
SSH.open(iServerIPAddr, iServerUser, iServerPasswd)
SSH.command('killall --signal SIGKILL iperf', '\$', 5)
time.sleep(0.5)
SSH.close()
if (clientStatus == -1):
if (os.path.isfile('iperf_server_' + self.testCase_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '.log')
SSH.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
filename='iperf_server_' + self.testCase_id + '_OAI-UE.log'
self.Iperf_analyzeV2Server(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options,filename,0)
# copying on the EPC server for logCollection
if (clientStatus == -1):
copyin_res = SSH.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_server_' + self.testCase_id + '_OAI-UE.log', EPC.SourceCodePath + '/scripts')
copyin_res = SSH.copyin(iClientIPAddr, iClientUser, iClientPasswd, '/tmp/tmp_iperf_' + self.testCase_id + '.log', 'iperf_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
SSH.copyout(EPC.IPAddress, EPC.UserName, EPC.Password, 'iperf_' + self.testCase_id + '_OAI-UE.log', EPC.SourceCodePath + '/scripts')
iperf_noperf = False
if status_queue.empty():
iperf_status = False
else:
iperf_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def Iperf(self,HTML,RAN,EPC,COTS_UE, InfraUE):
result = re.search('noS1', str(RAN.Initialize_eNB_args))
if result is not None:
self.IperfNoS1(HTML,RAN,EPC,COTS_UE,InfraUE)
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '' or EPC.SourceCodePath == '' or self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE,RAN,EPC)
if (pStatus < 0):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
if self.ue_id=="":#is not a module, follow legacy code
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', CONST.UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
return
else: #is a module
self.UEIPAddresses=[]
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
Module_UE.GetModuleIPAddress()
self.UEIPAddresses.append(Module_UE.UEIPAddress)
self.dummyIperfVersion = '2.0.10'
#cmd = 'iperf --version'
#logging.debug(cmd + '\n')
#iperfStdout = subprocess.check_output(cmd, shell=True, universal_newlines=True)
#result = re.search('iperf version 2.0.5', str(iperfStdout.strip()))
#if result is not None:
# dummyIperfVersion = '2.0.5'
#result = re.search('iperf version 2.0.10', str(iperfStdout.strip()))
#if result is not None:
# dummyIperfVersion = '2.0.10'
multi_jobs = []
i = 0
ue_num = len(self.UEIPAddresses)
lock = Lock()
status_queue = SimpleQueue()
logging.debug(self.UEIPAddresses)
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
#special quick and dirty treatment for modules, iperf to be restructured
if self.ue_id!="": #is module
device_id = Module_UE.ID + "-" + Module_UE.Kind
p = Process(target = self.Iperf_Module ,args = (lock, UE_IPAddress, device_id, i, ue_num, status_queue, EPC, Module_UE,))
else: #legacy code
p = Process(target = self.Iperf_common, args = (lock, UE_IPAddress, device_id, i, ue_num, status_queue, EPC, ))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
HTML.CreateHtmlTestRow(self.iperf_args, 'KO', CONST.ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else:
iperf_status = True
iperf_noperf = False
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
def CheckProcessExist(self, check_eNB, check_OAI_UE,RAN,EPC):
multi_jobs = []
status_queue = SimpleQueue()
# in noS1 config, no need to check status from EPC
# in gNB also currently no need to check
result = re.search('noS1|band78', str(RAN.Initialize_eNB_args))
if result is None:
p = Process(target = EPC.CheckHSSProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = EPC.CheckMMEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = EPC.CheckSPGWProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
else:
if (check_eNB == False) and (check_OAI_UE == False):
return 0
if check_eNB:
p = Process(target = RAN.CheckeNBProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
if check_OAI_UE:
p = Process(target = self.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == CONST.ENB_PROCESS_FAILED:
fileCheck = re.search('enb_', str(RAN.eNBLogFiles[0]))
if fileCheck is not None:
SSH.copyin(RAN.eNBIPAddress, RAN.eNBUserName, RAN.eNBPassword, RAN.eNBSourceCodePath + '/cmake_targets/' + RAN.eNBLogFiles[0], '.')
logStatus = RAN.AnalyzeLogFile_eNB(RAN.eNBLogFiles[0])
if logStatus < 0:
result = logStatus
RAN.eNBLogFiles[0]=''
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.TerminateFlexranCtrl()
return result
def CheckOAIUEProcessExist(self, initialize_OAI_UE_flag,HTML,RAN):
multi_jobs = []
status_queue = SimpleQueue()
if initialize_OAI_UE_flag == False:
p = Process(target = self.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == CONST.OAI_UE_PROCESS_FAILED:
fileCheck = re.search('ue_', str(self.UELogFile))
if fileCheck is not None:
SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile,HTML,RAN)
if logStatus < 0:
result = logStatus
return result
def CheckOAIUEProcess(self, status_queue):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('stdbuf -o0 ps -aux | grep --color=never ' + self.air_interface + ' | grep -v grep', '\$', 5)
result = re.search(self.air_interface, SSH.getBefore())
if result is None:
logging.debug('\u001B[1;37;41m OAI UE Process Not Found! \u001B[0m')
status_queue.put(CONST.OAI_UE_PROCESS_FAILED)
else:
status_queue.put(CONST.OAI_UE_PROCESS_OK)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AnalyzeLogFile_UE(self, UElogFile,HTML,RAN):
if (not os.path.isfile('./' + UElogFile)):
return -1
ue_log_file = open('./' + UElogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
uciStatMsgCount = 0
pdcpDataReqFailedCount = 0
badDciCount = 0
f1aRetransmissionCount = 0
fatalErrorCount = 0
macBsrTimerExpiredCount = 0
rrcConnectionRecfgComplete = 0
no_cell_sync_found = False
mib_found = False
frequency_found = False
plmn_found = False
nrUEFlag = False
nrDecodeMib = 0
nrFoundDCI = 0
nrCRCOK = 0
mbms_messages = 0
HTML.htmlUEFailureMsg=''
global_status = CONST.ALL_PROCESSES_OK
for line in ue_log_file.readlines():
result = re.search('nr_synchro_time', str(line))
if result is not None:
nrUEFlag = True
if nrUEFlag:
result = re.search('decode mib', str(line))
if result is not None:
nrDecodeMib += 1
result = re.search('found 1 DCIs', str(line))
if result is not None:
nrFoundDCI += 1
result = re.search('CRC OK', str(line))
if result is not None:
nrCRCOK += 1
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('System error|[Ss]egmentation [Ff]ault|======= Backtrace: =========|======= Memory map: ========', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte-uesoftmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('uci->stat', str(line))
if result is not None and not exitSignalReceived:
uciStatMsgCount += 1
result = re.search('PDCP data request failed', str(line))
if result is not None and not exitSignalReceived:
pdcpDataReqFailedCount += 1
result = re.search('bad DCI 1', str(line))
if result is not None and not exitSignalReceived:
badDciCount += 1
result = re.search('Format1A Retransmission but TBS are different', str(line))
if result is not None and not exitSignalReceived:
f1aRetransmissionCount += 1
result = re.search('FATAL ERROR', str(line))
if result is not None and not exitSignalReceived:
fatalErrorCount += 1
result = re.search('MAC BSR Triggered ReTxBSR Timer expiry', str(line))
if result is not None and not exitSignalReceived:
macBsrTimerExpiredCount += 1
result = re.search('Generating RRCConnectionReconfigurationComplete', str(line))
if result is not None:
rrcConnectionRecfgComplete += 1
# No cell synchronization found, abandoning
result = re.search('No cell synchronization found, abandoning', str(line))
if result is not None:
no_cell_sync_found = True
if RAN.eNBmbmsEnables[0]:
result = re.search('TRIED TO PUSH MBMS DATA', str(line))
if result is not None:
mbms_messages += 1
result = re.search("MIB Information => ([a-zA-Z]{1,10}), ([a-zA-Z]{1,10}), NidCell (?P<nidcell>\d{1,3}), N_RB_DL (?P<n_rb_dl>\d{1,3}), PHICH DURATION (?P<phich_duration>\d), PHICH RESOURCE (?P<phich_resource>.{1,4}), TX_ANT (?P<tx_ant>\d)", str(line))
if result is not None and (not mib_found):
try:
mibMsg = "MIB Information: " + result.group(1) + ', ' + result.group(2)
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " nidcell = " + result.group('nidcell')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " n_rb_dl = " + result.group('n_rb_dl')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_duration = " + result.group('phich_duration')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_resource = " + result.group('phich_resource')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " tx_ant = " + result.group('tx_ant')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mib_found = True
except Exception as e:
logging.error('\033[91m' + "MIB marker was not found" + '\033[0m')
result = re.search("Measured Carrier Frequency (?P<measured_carrier_frequency>\d{1,15}) Hz", str(line))
if result is not None and (not frequency_found):
try:
mibMsg = "Measured Carrier Frequency = " + result.group('measured_carrier_frequency') + ' Hz'
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
frequency_found = True
except Exception as e:
logging.error('\033[91m' + "Measured Carrier Frequency not found" + '\033[0m')
result = re.search("PLMN MCC (?P<mcc>\d{1,3}), MNC (?P<mnc>\d{1,3}), TAC", str(line))
if result is not None and (not plmn_found):
try:
mibMsg = 'PLMN MCC = ' + result.group('mcc') + ' MNC = ' + result.group('mnc')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
plmn_found = True
except Exception as e:
logging.error('\033[91m' + "PLMN not found" + '\033[0m')
result = re.search("Found (?P<operator>[\w,\s]{1,15}) \(name from internal table\)", str(line))
if result is not None:
try:
mibMsg = "The operator is: " + result.group('operator')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "Operator name not found" + '\033[0m')
result = re.search("SIB5 InterFreqCarrierFreq element (.{1,4})/(.{1,4})", str(line))
if result is not None:
try:
mibMsg = "SIB5 InterFreqCarrierFreq element " + result.group(1) + '/' + result.group(2)
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + mibMsg + ' -> '
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "SIB5 InterFreqCarrierFreq element not found" + '\033[0m')
result = re.search("DL Carrier Frequency/ARFCN : \-*(?P<carrier_frequency>\d{1,15}/\d{1,4})", str(line))
if result is not None:
try:
freq = result.group('carrier_frequency')
new_freq = re.sub('/[0-9]+','',freq)
float_freq = float(new_freq) / 1000000
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'DL Freq: ' + ('%.1f' % float_freq) + ' MHz'
logging.debug('\033[94m' + " DL Carrier Frequency is: " + str(freq) + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " DL Carrier Frequency not found" + '\033[0m')
result = re.search("AllowedMeasBandwidth : (?P<allowed_bandwidth>\d{1,7})", str(line))
if result is not None:
try:
prb = result.group('allowed_bandwidth')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + ' -- PRB: ' + prb + '\n'
logging.debug('\033[94m' + " AllowedMeasBandwidth: " + prb + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " AllowedMeasBandwidth not found" + '\033[0m')
ue_log_file.close()
if rrcConnectionRecfgComplete > 0:
statMsg = 'UE connected to eNB (' + str(rrcConnectionRecfgComplete) + ' RRCConnectionReconfigurationComplete message(s) generated)'
logging.debug('\033[94m' + statMsg + '\033[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrUEFlag:
if nrDecodeMib > 0:
statMsg = 'UE showed ' + str(nrDecodeMib) + ' MIB decode message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrFoundDCI > 0:
statMsg = 'UE showed ' + str(nrFoundDCI) + ' DCI found message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if nrCRCOK > 0:
statMsg = 'UE showed ' + str(nrCRCOK) + ' PDSCH decoding message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if not frequency_found:
statMsg = 'NR-UE could NOT synch!'
logging.error('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if uciStatMsgCount > 0:
statMsg = 'UE showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if pdcpDataReqFailedCount > 0:
statMsg = 'UE showed ' + str(pdcpDataReqFailedCount) + ' "PDCP data request failed" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if badDciCount > 0:
statMsg = 'UE showed ' + str(badDciCount) + ' "bad DCI 1(A)" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if f1aRetransmissionCount > 0:
statMsg = 'UE showed ' + str(f1aRetransmissionCount) + ' "Format1A Retransmission but TBS are different" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if fatalErrorCount > 0:
statMsg = 'UE showed ' + str(fatalErrorCount) + ' "FATAL ERROR:" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if macBsrTimerExpiredCount > 0:
statMsg = 'UE showed ' + str(fatalErrorCount) + ' "MAC BSR Triggered ReTxBSR Timer expiry" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if RAN.eNBmbmsEnables[0]:
if mbms_messages > 0:
statMsg = 'UE showed ' + str(mbms_messages) + ' "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
else:
statMsg = 'UE did NOT SHOW "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;41m ' + statMsg + ' \u001B[0m')
global_status = CONST.OAI_UE_PROCESS_NO_MBMS_MSGS
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + statMsg + '\n'
if foundSegFault:
logging.debug('\u001B[1;37;41m UE ended with a Segmentation Fault! \u001B[0m')
if not nrUEFlag:
global_status = CONST.OAI_UE_PROCESS_SEG_FAULT
else:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;30;43m UE showed an assertion! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE showed an assertion!\n'
if not nrUEFlag:
if not mib_found or not frequency_found:
global_status = CONST.OAI_UE_PROCESS_ASSERTION
else:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m UE faced real time issues! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE faced real time issues!\n'
if nrUEFlag:
if not frequency_found:
global_status = CONST.OAI_UE_PROCESS_COULD_NOT_SYNC
else:
if no_cell_sync_found and not mib_found:
logging.debug('\u001B[1;37;41m UE could not synchronize ! \u001B[0m')
HTML.htmlUEFailureMsg=HTML.htmlUEFailureMsg + 'UE could not synchronize!\n'
global_status = CONST.OAI_UE_PROCESS_COULD_NOT_SYNC
return global_status
def TerminateFlexranCtrl(self,HTML,RAN,EPC):
if RAN.flexranCtrlInstalled == False or RAN.flexranCtrlStarted == False:
return
if EPC.IPAddress == '' or EPC.UserName == '' or EPC.Password == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('echo ' + EPC.Password + ' | sudo -S daemon --name=flexran_rtc_daemon --stop', '\$', 5)
time.sleep(1)
SSH.command('echo ' + EPC.Password + ' | sudo -S killall --signal SIGKILL rt_controller', '\$', 5)
time.sleep(1)
SSH.close()
RAN.flexranCtrlStarted=False
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def TerminateUE_common(self, device_id, idx,COTS_UE):
try:
SSH = sshconnection.SSHConnection()
SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# back in airplane mode on (ie radio off)
if self.ADBCentralized:
#RH quick add on to intgrate cots control defined by yaml
#if device Id exists in yaml dictionary, we execute the new procedure defined in cots_ue class
#otherwise we use the legacy procedure
if COTS_UE.Check_Exists(device_id):
#switch device to Airplane mode ON (ie Radio OFF)
COTS_UE.Set_Airplane(device_id, 'ON')
elif device_id == '84B7N16418004022':
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', SSH.getBefore())
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
SSH.command('stdbuf -o0 adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"', '\$', 5)
else:
SSH.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
SSH.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def TerminateUE(self,HTML,COTS_UE,InfraUE,ue_trace):
if self.ue_id=='':#no ID specified, then it is a COTS controlled by ADB
terminate_ue_flag = False
self.GetAllUEDevices(terminate_ue_flag)
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target= self.TerminateUE_common, args = (device_id,i,COTS_UE,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
else: #if an ID is specified, it is a module from the yaml infrastructure file
Module_UE = cls_module_ue.Module_UE(InfraUE.ci_ue_infra[self.ue_id])
Module_UE.ue_trace=ue_trace
Module_UE.Command("detach")
Module_UE.DisableTrace()
Module_UE.DisableCM()
archive_destination=Module_UE.LogCollect()
if Module_UE.ue_trace=='yes':
HTML.CreateHtmlTestRow('QLog at : '+archive_destination, 'OK', CONST.ALL_PROCESSES_OK)
else:
HTML.CreateHtmlTestRow('QLog trace is disabled', 'OK', CONST.ALL_PROCESSES_OK)
def TerminateOAIUE(self,HTML,RAN,COTS_UE,EPC, InfraUE):
SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
SSH.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGINT -r .*-uesoftmodem || true', '\$', 5)
time.sleep(10)
SSH.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('-uesoftmodem', SSH.getBefore())
if result is not None:
SSH.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGKILL -r .*-uesoftmodem || true', '\$', 5)
time.sleep(5)
SSH.command('rm -f my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
SSH.close()
result = re.search('ue_', str(self.UELogFile))
if result is not None:
copyin_res = SSH.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy UE logfile to analyze it! \u001B[0m')
HTML.htmlUEFailureMsg='Could not copy UE logfile to analyze it!'
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE, 'UE')
self.UELogFile = ''
return
logging.debug('\u001B[1m Analyzing UE logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile,HTML,RAN)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is not None:
ueAction = 'Sniffing'
else:
ueAction = 'Connection'
if (logStatus < 0):
logging.debug('\u001B[1m' + ueAction + ' Failed \u001B[0m')
HTML.htmlUEFailureMsg='<b>' + ueAction + ' Failed</b>\n' + HTML.htmlUEFailureMsg
HTML.CreateHtmlTestRow('N/A', 'KO', logStatus, 'UE')
if self.air_interface == 'lte-uesoftmodem':
# In case of sniffing on commercial eNBs we have random results
# Not an error then
if (logStatus != CONST.OAI_UE_PROCESS_COULD_NOT_SYNC) or (ueAction != 'Sniffing'):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else:
if (logStatus == CONST.OAI_UE_PROCESS_COULD_NOT_SYNC):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB(HTML,RAN,COTS_UE,EPC,InfraUE)
else:
logging.debug('\u001B[1m' + ueAction + ' Completed \u001B[0m')
HTML.htmlUEFailureMsg='<b>' + ueAction + ' Completed</b>\n' + HTML.htmlUEFailureMsg
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
self.UELogFile = ''
else:
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
def AutoTerminateUEandeNB(self,HTML,RAN,COTS_UE,EPC,InfraUE):
if (self.ADBIPAddress != 'none'):
self.testCase_id = 'AUTO-KILL-UE'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of UE'
HTML.desc='Automatic Termination of UE'
self.ShowTestID()
self.TerminateUE(HTML,COTS_UE,InfraUE,self.ue_trace)
if (self.Initialize_OAI_UE_args != ''):
self.testCase_id = 'AUTO-KILL-OAI-UE'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of OAI-UE'
HTML.desc='Automatic Termination of OAI-UE'
self.ShowTestID()
self.TerminateOAIUE(HTML,RAN,COTS_UE,EPC,InfraUE)
if (RAN.Initialize_eNB_args != ''):
self.testCase_id = 'AUTO-KILL-RAN'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of all RAN nodes'
HTML.desc='Automatic Termination of RAN nodes'
self.ShowTestID()
#terminate all RAN nodes eNB/gNB/OCP
for instance in range(0, len(RAN.air_interface)):
if RAN.air_interface[instance]!='':
logging.debug('Auto Termination of Instance ' + str(instance) + ' : ' + RAN.air_interface[instance])
RAN.eNB_instance=instance
RAN.TerminateeNB(HTML,EPC)
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.testCase_id = 'AUTO-KILL-flexran-ctl'
HTML.testCase_id=self.testCase_id
self.desc = 'Automatic Termination of FlexRan CTL'
HTML.desc='Automatic Termination of FlexRan CTL'
self.ShowTestID()
self.TerminateFlexranCtrl(HTML,RAN,EPC)
RAN.prematureExit=True
def IdleSleep(self,HTML):
time.sleep(self.idle_sleep_time)
HTML.CreateHtmlTestRow(str(self.idle_sleep_time) + ' sec', 'OK', CONST.ALL_PROCESSES_OK)
def X2_Status(self, idx, fileName):
cmd = "curl --silent http://" + EPC.IPAddress + ":9999/stats | jq '.' > " + fileName
message = cmd + '\n'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
if idx == 0:
cmd = "jq '.mac_stats | length' " + fileName
strNbEnbs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2NbENBs = int(strNbEnbs.strip())
cnt = 0
while cnt < self.x2NbENBs:
cmd = "jq '.mac_stats[" + str(cnt) + "].bs_id' " + fileName
bs_id = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBBsIds[idx].append(bs_id.strip())
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats | length' " + fileName
stNbUEs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
nbUEs = int(stNbUEs.strip())
ueIdx = 0
self.x2ENBConnectedUEs[idx].append([])
while ueIdx < nbUEs:
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats[" + str(ueIdx) + "].rnti' " + fileName
rnti = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBConnectedUEs[idx][cnt].append(rnti.strip())
ueIdx += 1
cnt += 1
msg = "FlexRan Controller is connected to " + str(self.x2NbENBs) + " eNB(s)"
logging.debug(msg)
message += msg + '\n'
cnt = 0
while cnt < self.x2NbENBs:
msg = " -- eNB: " + str(self.x2ENBBsIds[idx][cnt]) + " is connected to " + str(len(self.x2ENBConnectedUEs[idx][cnt])) + " UE(s)"
logging.debug(msg)
message += msg + '\n'
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[idx][cnt]):
msg = " -- UE rnti: " + str(self.x2ENBConnectedUEs[idx][cnt][ueIdx])
logging.debug(msg)
message += msg + '\n'
ueIdx += 1
cnt += 1
return message
def Perform_X2_Handover(self,HTML,RAN,EPC):
html_queue = SimpleQueue()
fullMessage = '<pre style="background-color:white">'
msg = 'Doing X2 Handover w/ option ' + self.x2_ho_options
logging.debug(msg)
fullMessage += msg + '\n'
if self.x2_ho_options == 'network':
if RAN.flexranCtrlInstalled and RAN.flexranCtrlStarted:
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.x2ENBBsIds.append([])
self.x2ENBBsIds.append([])
self.x2ENBConnectedUEs.append([])
self.x2ENBConnectedUEs.append([])
fullMessage += self.X2_Status(0, self.testCase_id + '_pre_ho.json')
msg = "Activating the X2 Net control on each eNB"
logging.debug(msg)
fullMessage += msg + '\n'
eNB_cnt = self.x2NbENBs
cnt = 0
while cnt < eNB_cnt:
cmd = "curl -XPOST http://" + EPC.IPAddress + ":9999/rrc/x2_ho_net_control/enb/" + str(self.x2ENBBsIds[0][cnt]) + "/1"
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
cnt += 1
# Waiting for the activation to be active
time.sleep(10)
msg = "Switching UE(s) from eNB to eNB"
logging.debug(msg)
fullMessage += msg + '\n'
cnt = 0
while cnt < eNB_cnt:
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[0][cnt]):
cmd = "curl -XPOST http://" + EPC.IPAddress() + ":9999/rrc/ho/senb/" + str(self.x2ENBBsIds[0][cnt]) + "/ue/" + str(self.x2ENBConnectedUEs[0][cnt][ueIdx]) + "/tenb/" + str(self.x2ENBBsIds[0][eNB_cnt - cnt - 1])
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
ueIdx += 1
cnt += 1
time.sleep(10)
# check
logging.debug("Checking the Status after X2 Handover")
fullMessage += self.X2_Status(1, self.testCase_id + '_post_ho.json')
cnt = 0
x2Status = True
while cnt < eNB_cnt:
if len(self.x2ENBConnectedUEs[0][cnt]) == len(self.x2ENBConnectedUEs[1][cnt]):
x2Status = False
cnt += 1
if x2Status:
msg = "X2 Handover was successful"
logging.debug(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
msg = "X2 Handover FAILED"
logging.error(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
HTML.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
HTML.CreateHtmlTestRow('Cannot perform requested X2 Handover', 'KO', CONST.ALL_PROCESSES_OK)
def LogCollectBuild(self,RAN):
SSH = sshconnection.SSHConnection()
if (RAN.eNBIPAddress != '' and RAN.eNBUserName != '' and RAN.eNBPassword != ''):
IPAddress = RAN.eNBIPAddress
UserName = RAN.eNBUserName
Password = RAN.eNBPassword
SourceCodePath = RAN.eNBSourceCodePath
elif (self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != ''):
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
SourceCodePath = self.UESourceCodePath
else:
sys.exit('Insufficient Parameter')
SSH.open(IPAddress, UserName, Password)
SSH.command('cd ' + SourceCodePath, '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('rm -f build.log.zip', '\$', 5)
SSH.command('zip -r build.log.zip build_log_*/*', '\$', 60)
SSH.close()
def LogCollectPing(self,EPC):
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
SSH.command('rm -f ping.log.zip', '\$', 5)
SSH.command('zip ping.log.zip ping*.log', '\$', 60)
SSH.command('rm ping*.log', '\$', 5)
SSH.close()
def LogCollectIperf(self,EPC):
SSH = sshconnection.SSHConnection()
SSH.open(EPC.IPAddress, EPC.UserName, EPC.Password)
SSH.command('cd ' + EPC.SourceCodePath, '\$', 5)
SSH.command('cd scripts', '\$', 5)
SSH.command('rm -f iperf.log.zip', '\$', 5)
SSH.command('zip iperf.log.zip iperf*.log', '\$', 60)
SSH.command('rm iperf*.log', '\$', 5)
SSH.close()
def LogCollectOAIUE(self):
SSH = sshconnection.SSHConnection()
SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
SSH.command('cd ' + self.UESourceCodePath, '\$', 5)
SSH.command('cd cmake_targets', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm -f ue.log.zip', '\$', 5)
SSH.command('echo ' + self.UEPassword + ' | sudo -S zip ue.log.zip ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 60)
SSH.command('echo ' + self.UEPassword + ' | sudo -S rm ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 5)
SSH.close()
def RetrieveSystemVersion(self, machine,HTML,RAN):
if RAN.eNBIPAddress == 'none' or self.UEIPAddress == 'none':
HTML.OsVersion[0]='Ubuntu 16.04.5 LTS'
HTML.KernelVersion[0]='4.15.0-45-generic'
HTML.UhdVersion[0]='3.13.0.1-0'
HTML.UsrpBoard[0]='B210'
HTML.CpuNb[0]='4'
HTML.CpuModel[0]='Intel(R) Core(TM) i5-6200U'
HTML.CpuMHz[0]='2399.996 MHz'
return 0
if machine == 'eNB':
if RAN.eNBIPAddress != '' and RAN.eNBUserName != '' and RAN.eNBPassword != '':
IPAddress = RAN.eNBIPAddress
UserName = RAN.eNBUserName
Password = RAN.eNBPassword
idx = 0
else:
return -1
if machine == 'UE':
if self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != '':
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
idx = 1
else:
return -1
SSH = sshconnection.SSHConnection()
SSH.open(IPAddress, UserName, Password)
SSH.command('lsb_release -a', '\$', 5)
result = re.search('Description:\\\\t(?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', SSH.getBefore())
if result is not None:
OsVersion = result.group('os_type')
logging.debug('OS is: ' + OsVersion)
HTML.OsVersion[idx]=OsVersion
else:
SSH.command('hostnamectl', '\$', 5)
result = re.search('Operating System: (?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', SSH.getBefore())
if result is not None:
OsVersion = result.group('os_type')
if OsVersion == 'CentOS Linux 7 ':
SSH.command('cat /etc/redhat-release', '\$', 5)
result = re.search('CentOS Linux release (?P<os_version>[0-9\.]+)', SSH.getBefore())
if result is not None:
OsVersion = OsVersion.replace('7 ', result.group('os_version'))
logging.debug('OS is: ' + OsVersion)
HTML.OsVersion[idx]=OsVersion
SSH.command('uname -r', '\$', 5)
result = re.search('uname -r\\\\r\\\\n(?P<kernel_version>[a-zA-Z0-9\-\_\.]+)', SSH.getBefore())
if result is not None:
KernelVersion = result.group('kernel_version')
logging.debug('Kernel Version is: ' + KernelVersion)
HTML.KernelVersion[idx]=KernelVersion
SSH.command('dpkg --list | egrep --color=never libuhd003', '\$', 5)
result = re.search('libuhd003:amd64 *(?P<uhd_version>[0-9\.]+)', SSH.getBefore())
if result is not None:
UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + UhdVersion)
HTML.UhdVersion[idx]=UhdVersion
else:
SSH.command('uhd_config_info --version', '\$', 5)
result = re.search('UHD (?P<uhd_version>[a-zA-Z0-9\.\-]+)', SSH.getBefore())
if result is not None:
UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + UhdVersion)
HTML.UhdVersion[idx]=UhdVersion
SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 90)
usrp_boards = re.findall('product: ([0-9A-Za-z]+)\\\\r\\\\n', SSH.getBefore())
count = 0
for board in usrp_boards:
if count == 0:
UsrpBoard = board
else:
UsrpBoard += ',' + board
count += 1
if count > 0:
logging.debug('USRP Board(s) : ' + UsrpBoard)
HTML.UsrpBoard[idx]=UsrpBoard
SSH.command('lscpu', '\$', 5)
result = re.search('CPU\(s\): *(?P<nb_cpus>[0-9]+).*Model name: *(?P<model>[a-zA-Z0-9\-\_\.\ \(\)]+).*CPU MHz: *(?P<cpu_mhz>[0-9\.]+)', SSH.getBefore())
if result is not None:
CpuNb = result.group('nb_cpus')
logging.debug('nb_cpus: ' + CpuNb)
HTML.CpuNb[idx]=CpuNb
CpuModel = result.group('model')
logging.debug('model: ' + CpuModel)
HTML.CpuModel[idx]=CpuModel
CpuMHz = result.group('cpu_mhz') + ' MHz'
logging.debug('cpu_mhz: ' + CpuMHz)
HTML.CpuMHz[idx]=CpuMHz
SSH.close()
def ConditionalExit(self):
if self.testUnstable:
if self.testStabilityPointReached or self.testMinStableId == '999999':
sys.exit(0)
sys.exit(1)
def ShowTestID(self):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1mTest ID:' + self.testCase_id + '\u001B[0m')
logging.debug('\u001B[1m' + self.desc + '\u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
|
msee_tests.py
|
import sys
import ptf
from ptf.base_tests import BaseTest
import ptf.testutils as testutils
from ptf.testutils import simple_arp_packet
import struct
from threading import Thread
from pprint import pprint
sys.path.append('/usr/lib/python2.7/site-packages')
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from arp_responder import arp_responder
from arp_responder.ttypes import req_tuple_t, req_tuples_t, rep_tuple_t
class MSEEEasyTest(BaseTest):
def setUp(self):
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
self.my_mac = {}
self.remote_mac = {}
for port_id, port in self.dataplane.ports.iteritems():
self.my_mac[port_id[1]] = port.mac()
self.remote_mac[port_id[1]] = self.get_remote_mac(port_id[1])
self.result = None
socket = TSocket.TSocket('localhost', 9091)
self.transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = arp_responder.Client(protocol)
self.transport.open()
def tearDown(self):
self.transport.close()
def runTest(self):
self.test_reply()
self.test_request()
def test_reply(self):
self.test_reply_qinq(0)
self.test_reply_qinq(1)
def test_reply_qinq(self, port_number):
intf = 'iif%d' % port_number
stag = 10
ctag = 20
self.client.add_interface(intf)
self.client.add_ip(intf, stag, ctag, 0x01020304)
src_mac = self.my_mac[port_number]
packet = simple_arp_packet(
pktlen=42,
eth_src=src_mac,
ip_snd='1.2.3.1',
ip_tgt='1.2.3.4',
hw_snd=src_mac,
hw_tgt='00:00:00:00:00:00',
)
tagged_packet = self.insert_tags(packet, stag, ctag)
testutils.send_packet(self, (0, port_number), tagged_packet)
self.client.del_ip(intf, stag, ctag)
self.client.del_interface(intf)
exp_packet = simple_arp_packet(
pktlen=42,
eth_dst=src_mac,
eth_src=self.remote_mac[port_number],
arp_op=2,
ip_snd='1.2.3.4',
ip_tgt='1.2.3.1',
hw_snd=self.remote_mac[port_number],
hw_tgt=src_mac
)
tagged_exp_packet = self.insert_tags(exp_packet, stag, ctag)
testutils.verify_packet(self, tagged_exp_packet, port_number)
def test_request(self):
thr = Thread(target=self.request_mac_thread)
thr.start()
stag = 10
ctag = 20
exp_packet_1 = simple_arp_packet(
pktlen=42,
eth_dst='ff:ff:ff:ff:ff:ff',
eth_src=self.remote_mac[0],
ip_snd='1.2.3.4',
ip_tgt='1.2.3.1',
hw_snd=self.remote_mac[0],
hw_tgt='ff:ff:ff:ff:ff:ff'
)
t_exp_packet0 = self.insert_tags(exp_packet_1, stag, ctag)
testutils.verify_packet(self, t_exp_packet0, 0)
exp_packet_2 = simple_arp_packet(
pktlen=42,
eth_dst='ff:ff:ff:ff:ff:ff',
eth_src=self.remote_mac[1],
ip_snd='1.2.3.5',
ip_tgt='1.2.3.1',
hw_snd=self.remote_mac[1],
hw_tgt='ff:ff:ff:ff:ff:ff'
)
t_exp_packet1 = self.insert_tags(exp_packet_2, stag, ctag)
testutils.verify_packet(self, t_exp_packet1, 1)
packet = simple_arp_packet(
pktlen=42,
eth_dst=self.remote_mac[0],
eth_src=self.my_mac[0],
arp_op=2,
ip_snd='1.2.3.1',
ip_tgt='1.2.3.4',
hw_snd=self.my_mac[0],
hw_tgt=self.remote_mac[0]
)
tagged_packet = self.insert_tags(packet, stag, ctag)
testutils.send_packet(self, (0, 0), tagged_packet)
thr.join()
result_mac = ":".join("%02x" % v for v in list(struct.unpack("BBBBBB", self.result[0].mac)))
self.assertTrue(self.result[0].index == 0)
self.assertTrue(self.result[0].is_found)
self.assertTrue(result_mac == self.my_mac[0])
self.assertTrue(self.result[0].request.stag == 10)
self.assertTrue(self.result[0].request.ctag == 20)
self.assertTrue(self.result[0].request.iface_name == 'iif0')
def request_mac_thread(self):
self.client.add_interface('iif0')
self.client.add_ip('iif0', 10, 20, 0x01020304)
self.client.add_interface('iif1')
self.client.add_ip('iif1', 10, 20, 0x01020305)
t1 = req_tuples_t([req_tuple_t('iif0', 10, 20), req_tuple_t('iif1', 10, 20)], 0, 0x01020301)
self.result = self.client.request_mac([t1])
self.client.del_ip('iif1', 10, 20)
self.client.del_interface('iif1')
self.client.del_ip('iif0', 10, 20)
self.client.del_interface('iif0')
def get_remote_mac(self, port_number):
mac, _, _ = self.cmd(['docker', 'exec', '-ti', 'arpresponder_test', 'cat', '/sys/class/net/iif%d/address' % port_number])
return mac.strip()
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def insert_tags(self, packet, stag, ctag):
p = str(packet)
vlan_hdr = struct.pack("!HHHH",0x88A8, stag, 0x8100, ctag)
return p[0:12] + vlan_hdr + p[12:]
|
test_server.py
|
import os
from multiprocessing.managers import DictProxy
from pathlib import Path
from unittest.mock import Mock, ANY
import requests
import time
import uuid
import urllib.parse
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from contextlib import ExitStack
from _pytest import pathlib
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
import rasa.server
from rasa.core import utils
from rasa.core.tracker_store import InMemoryTrackerStore
from rasa.shared.core import events
from rasa.core.agent import Agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.shared.core.constants import ACTION_SESSION_START_NAME
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic
from sanic.testing import SanicASGITestClient
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
from ruamel.yaml import StringIO
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
async def test_root(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/version")
content = response.json()
assert response.status == 200
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json()["model_file"]
assert response.status == 200
assert "fingerprint" in response.json()
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json()["model_file"]
assert response.status == 200
assert "fingerprint" in response.json()
assert "model_file" in response.json()
assert model_file == trained_nlu_model
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == 401
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == 409
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return requests.get("http://localhost:5005/status").status_code == 200
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == 200
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json()
assert response.status == 200
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == 200
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == 200
rjs = response.json()
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicASGITestClient):
_, response = await rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == 400
async def test_train_stack_success(
rasa_app: SanicASGITestClient,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
default_nlu_data: Text,
tmp_path: Path,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert response.headers["filename"] is not None
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
tmp_path: Path,
):
domain_data = rasa.shared.utils.io.read_yaml_file(default_domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_core_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
tmp_path: Path,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient, default_stack_config: Text, tmp_path: Path
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload, timeout=60 * 5)
assert response.status == 200
assert_trained_model(response.body, tmp_path)
def assert_trained_model(response_body: bytes, tmp_path: Path) -> None:
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
async def test_train_with_yaml(rasa_app: SanicASGITestClient, tmp_path: Path):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
polices:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert_trained_model(response.body, tmp_path)
async def test_train_with_invalid_yaml(rasa_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 400
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(headers: Dict, expected: bool):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("output")
assert payload.get("output") == expected
async def test_train_missing_config(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config=None)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == 400
async def test_train_missing_training_data(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == 400
async def test_train_internal_error(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == 500
async def test_evaluate_stories(
rasa_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app.post("/model/test/stories", data=stories)
assert response.status == 200
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == 409
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_story_file: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_story_file)
_, response = await rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == 200
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, default_nlu_data: Text):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_query_param(
rasa_app: SanicASGITestClient, trained_nlu_model, default_nlu_data: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json()["model_file"]
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == 200
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json()["model_file"]
async def test_predict(rasa_app: SanicASGITestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == 200
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json()
assert response.status == 200
assert content["paused"] is False
assert content["slots"] == {"name": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == 200
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json()
assert tracker is not None
assert len(tracker.get("events")) == 1
evt = tracker.get("events")[0]
deserialised_event = Event.from_parameters(evt)
assert deserialised_event == event
assert deserialised_event.timestamp > time_before_adding_events
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == 200
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert tracker.get("events") == events
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""],
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json() is not None
assert response.status == 200
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert tracker.get("events") == events
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json()["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == 200
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 200
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 403
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
def test_list_routes(default_agent: Agent):
app = rasa.server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == 200
assert "model_file" in response.json() and response.json()["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == 204
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json()
assert response.status == 200
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == 406
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == 204
_, response = await rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text
):
_, response = await rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == 204
_, response = await rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
async def test_load_model_invalid_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.put("/model")
assert response.status == 400
async def test_load_model_invalid_configuration(rasa_app: SanicASGITestClient):
data = {"model_file": "some-random-path"}
_, response = await rasa_app.put("/model", json=data)
assert response.status == 400
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == 200
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == 400
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == 500
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == 200
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 400
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 404
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
([RestInput(), SlackInput("test")], "slack", SlackBot),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
rasa_app.app.agent.tracker_store = tracker_store
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == 200
assert response.content.decode().strip() == expected
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient,
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
rasa_app.app.agent.tracker_store = tracker_store
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == 200
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from selfdrive.car.car_helpers import get_car
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
),
]
def replay_process(cfg, lr):
proc = managed_processes[cfg.proc_name]
if isinstance(proc, PythonProcess):
return python_replay_process(cfg, lr)
else:
return cpp_replay_process(cfg, lr)
def python_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
os.environ['FINGERPRINT'] = msg.carParams.carFingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
sockets = {s: messaging.sub_sock(s, timeout=1000) for s in sub_sockets}
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
time.sleep(1) # We give the process time to start
log_msgs = []
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for msg in tqdm(pub_msgs, disable=CI):
pm.send(msg.which(), msg.as_builder())
resp_sockets = sub_sockets if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is not None:
log_msgs.append(response)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
actor_definition.py
|
import contextlib
import logging
import os
import pkgutil
import sys
from io import UnsupportedOperation
from multiprocessing import Process, Queue
import leapp.libraries.actor # noqa # pylint: disable=unused-import
from leapp.actors import get_actor_metadata, get_actors
from leapp.exceptions import (ActorInspectionFailedError, LeappRuntimeError, MultipleActorsError,
UnsupportedDefinitionKindError)
from leapp.repository.definition import DefinitionKind
from leapp.utils.libraryfinder import LeappLibrariesFinder
def inspect_actor(definition, result_queue):
"""
Retrieves the actor information in a child process and returns the results back through `result_queue`.
:param definition: the actor definition to load
:type definition: :py:class:`ActorDefinition`
:param result_queue: queue to pass results back to the calling process
:type result_queue: :py:class:`multiprocessing.Queue`
"""
definition.load()
result = [get_actor_metadata(actor) for actor in get_actors()]
result = [entry for entry in result if entry['path'] in definition.full_path]
result_queue.put(result)
class ActorCallContext(object):
"""
Wraps the actor execution into child process.
"""
def __init__(self, definition, logger, messaging, config_model, skip_dialogs):
"""
:param definition: Actor definition
:type definition: :py:class:`leapp.repository.actor_definition.ActorDefinition`
:param logger: Logger
:type logger: :py:class:`logging.Logger`
:param messaging: Leapp Messaging
:type messaging: :py:class:`leapp.messaging.BaseMessaging`
:param config_model: Workflow provided configuration model
:type config_model: :py:class:`leapp.models.Model` derived class
"""
self.definition = definition
self.logger = logger
self.messaging = messaging
self.config_model = config_model
self.skip_dialogs = skip_dialogs
@staticmethod
def _do_run(stdin, logger, messaging, definition, config_model, skip_dialogs, args, kwargs):
if stdin is not None:
try:
sys.stdin = os.fdopen(stdin)
except OSError:
pass
definition.load()
with definition.injected_context():
target_actor = [actor for actor in get_actors() if actor.name == definition.name][0]
target_actor(logger=logger, messaging=messaging, config_model=config_model,
skip_dialogs=skip_dialogs).run(*args, **kwargs)
def run(self, *args, **kwargs):
"""
Performs the actor execution in the child process.
"""
try:
stdin = sys.stdin.fileno()
except UnsupportedOperation:
stdin = None
p = Process(target=self._do_run,
args=(stdin, self.logger, self.messaging, self.definition, self.config_model,
self.skip_dialogs, args, kwargs))
p.start()
p.join()
if p.exitcode != 0:
raise LeappRuntimeError(
'Actor {actorname} unexpectedly terminated with exit code: {exitcode}'
.format(actorname=self.definition.name, exitcode=p.exitcode))
class ActorDefinition(object):
"""
Defines actor resources.
"""
def __init__(self, directory, repo_dir, log=None):
"""
:param log: Logger
:type log: :py:class:`logging.Logger`
:param directory: Actor directory
:type directory: str
:param repo_dir: Repository directory
:type repo_dir: str
"""
self.log = log or logging.getLogger('leapp.actor')
self._directory = directory
self._repo_dir = repo_dir
self._definitions = {}
self._module = None
self._discovery = None
@property
def full_path(self):
return os.path.realpath(os.path.join(self._repo_dir, self._directory))
def add(self, kind, path):
"""
Adds any kind of actor resource to the Definition
:param kind: kind of resource added
:type kind: str
:param path: path to the added resource
:type path: str
"""
if kind not in DefinitionKind.ACTOR_WHITELIST:
self.log.error("Attempt to add item type %s to actor that is not supported", kind.name)
raise UnsupportedDefinitionKindError('Actors do not support {kind}.'.format(kind=kind.name))
self._definitions.setdefault(kind, []).append(path)
def serialize(self):
"""
:return: dump of actor resources (path, name, tools, files, libraries, tests)
"""
return {
'path': self.directory,
'name': self.name,
'class_name': self.class_name,
'description': self.description,
'tags': self.tags,
'consumes': self.consumes,
'produces': self.produces,
'apis': self.apis,
'dialogs': [dialog.serialize() for dialog in self.dialogs],
'tools': self.tools,
'files': self.files,
'libraries': self.libraries,
'tests': self.tests
}
def load(self):
"""
Loads the actor module to be introspectable.
"""
if not self._module:
with self.injected_context():
path = os.path.abspath(os.path.join(self._repo_dir, self.directory))
for importer, name, is_pkg in pkgutil.iter_modules((path,)):
if not is_pkg:
self._module = importer.find_module(name).load_module(name)
break
def discover(self):
"""
Performs introspection through a subprocess.
:return: Dictionary with discovered items.
"""
if not self._discovery:
self.log.debug("Starting actor discovery in %s", self.directory)
q = Queue(1)
p = Process(target=inspect_actor, args=(self, q))
p.start()
p.join()
if p.exitcode != 0:
self.log.error("Process inspecting actor in %s failed with %d", self.directory, p.exitcode)
raise ActorInspectionFailedError('Inspection of actor in {path} failed'.format(path=self.directory))
result = q.get()
if not result:
self.log.error("Process inspecting actor in %s returned no result", self.directory)
raise ActorInspectionFailedError(
'Inspection of actor in {path} produced no results'.format(path=self.directory))
if len(result) > 1:
self.log.error("Actor in %s returned multiple actors", self.directory)
raise MultipleActorsError(self.directory)
self._discovery = result[0]
for tag in self._discovery['tags']:
if self not in tag.actors:
tag.actors += (self,)
return self._discovery
def __call__(self, messaging=None, logger=None, config_model=None, skip_dialogs=False):
return ActorCallContext(definition=self, messaging=messaging, logger=logger, config_model=config_model,
skip_dialogs=skip_dialogs)
@property
def dialogs(self):
"""
:return: Tuple of defined dialogs
"""
return self.discover()['dialogs']
@property
def consumes(self):
"""
:return: Tuple of consumed models
"""
return self.discover()['consumes']
@property
def produces(self):
"""
:return: Tuple of produced models
"""
return self.discover()['produces']
@property
def tags(self):
"""
:return: Tuple of tags assigned to the actor
"""
return self.discover()['tags']
@property
def class_name(self):
"""
:return: Actor class name
"""
return self.discover()['class_name']
@property
def name(self):
"""
:return: Actor internal name
"""
return self.discover()['name']
@property
def description(self):
"""
:return: Actor description
"""
return self.discover()['description']
@contextlib.contextmanager
def injected_context(self):
"""
Prepares the actor environment for running the actor.
This includes injecting actor private libraries into :py:mod:`leapp.libraries.actor`
and setting environment variables for private tools and files.
:note: Use with caution.
"""
# Backup of the path variable
path_backup = os.environ.get('PATH', '')
os.environ['PATH'] = ':'.join(path_backup.split(':') + list(
os.path.join(self._repo_dir, self._directory, path) for path in self.tools))
files_backup = os.environ.get('LEAPP_FILES', None)
if self.files:
os.environ['LEAPP_FILES'] = os.path.join(self._repo_dir, self._directory, self.files[0])
tools_backup = os.environ.get('LEAPP_TOOLS', None)
if self.tools:
os.environ['LEAPP_TOOLS'] = os.path.join(self._repo_dir, self._directory, self.tools[0])
sys.meta_path.append(
LeappLibrariesFinder(
module_prefix='leapp.libraries.actor',
paths=[os.path.join(self._repo_dir, self.directory, x) for x in self.libraries]))
previous_path = os.getcwd()
os.chdir(os.path.join(self._repo_dir, self._directory))
try:
yield
finally:
os.chdir(previous_path)
# Restoration of the PATH environment variable
os.environ['PATH'] = path_backup
# Restoration of the LEAPP_FILES environment variable
if files_backup is not None:
os.environ['LEAPP_FILES'] = files_backup
else:
os.environ.pop('LEAPP_FILES', None)
if tools_backup is not None:
os.environ['LEAPP_TOOLS'] = tools_backup
else:
os.environ.pop('LEAPP_TOOLS', None)
@property
def apis(self):
"""
:return: names of APIs used by this actor
"""
return tuple(self.discover()['apis'])
@property
def directory(self):
"""
:return: The folder path of the actor
"""
return self._directory
@property
def tools(self):
"""
:return: Tuple with path to the tools folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TOOLS, ()))
@property
def libraries(self):
"""
:return: Tuple with path to the libraries folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.LIBRARIES, ()))
@property
def files(self):
"""
:return: Tuple with path to the files folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.FILES, ()))
@property
def tests(self):
"""
:return: Tuple with path to the tests folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TESTS, ()))
|
run.py
|
# -*- coding: utf-8 -*-
# 总调度器 + 多线程控制器
from do.house_selector import HouseSelector
from do.page_extractor import PageExtractor
from do.price_stater import PriceStater
from util.config import ConfigParser
from constant.logger import *
import time
class Do(HouseSelector, PageExtractor, PriceStater):
@staticmethod
def step_hs(times=10, size=50):
'''程序第一步'''
base_info("开始运行HouseSelector times=%d size=%d"%(times, size))
HouseSelector.spider_to_rds(times=times, size=size)
HouseSelector.rds_to_db()
@staticmethod
def step_pe(times=5, size=20):
'''程序第二步'''
base_info("开始运行PageExtractor times=%d size=%d"%(times, size))
PageExtractor.base_page_extract(size=size)
PageExtractor.loop_page_extract(size=size, times=times)
@staticmethod
def step_ps(size=20, mday=1):
'''程序第三步 - 价格趋势表每月获取一次'''
if mday == int(time.localtime().tm_mday):
base_info("开始运行PriceStater size=%d, mday=%d"%(size, mday))
PriceStater.price_stat(size=size)
else:
base_info("今天不是%d号,不执行PriceStater"%mday)
@staticmethod
def do(use_normal=True):
'''默认运行方法
- use_normal:使用默认值执行程序
'''
from multiprocessing import Process
if use_normal:
hs_times = 10
hs_size = 50
pe_times = 5
pe_size = 20
ps_mday = 1
ps_size = 20
else:
cfg_reader = ConfigParser(config_file="spider.cfg", section_name="quota")
hs_times = cfg_reader.read("hs_times")
hs_size = cfg_reader.read("hs_size")
pe_times = cfg_reader.read("pe_times")
pe_size = cfg_reader.read("pe_size")
ps_mday = cfg_reader.read("ps_mday")
ps_size = cfg_reader.read("ps_size")
Do.step_hs(times=int(hs_times), size=int(hs_size))
p_step_pe = Process(target=Do.step_pe, args=(int(pe_times), int(pe_size), ))
p_step_ps = Process(target=Do.step_ps, args=(int(ps_size), int(ps_mday), ))
p_step_pe.start()
p_step_ps.start()
|
fantome_opera_serveur.py
|
# coding=utf-8
from random import shuffle,randrange
from time import sleep
from threading import Thread
import dummy0, dummy1
latence = 0.01
permanents, deux, avant, apres = {'rose'}, {'rouge','gris','bleu'}, {'violet','marron'}, {'noir','blanc'}
couleurs = avant | permanents | apres | deux
passages = [{1,4},{0,2},{1,3},{2,7},{0,5,8},{4,6},{5,7},{3,6,9},{4,9},{7,8}]
pass_ext = [{1,4},{0,2,5,7},{1,3,6},{2,7},{0,5,8,9},{4,6,1,8},{5,7,2,9},{3,6,9,1},{4,9,5},{7,8,4,6}]
def message(texte,jos):
for j in jos:
f = open("./"+str(j.numero)+"/infos.txt","a")
f.write(texte + "\n")
f.close()
def informer(texte):
message(texte,joueurs)
def demander(q,j):
informer("QUESTION : "+q)
f = open("./"+str(j.numero)+"/questions"+".txt","w")
f.write(q)
f.close()
sleep(latence)
f = open("./"+str(j.numero)+"/reponses"+".txt","r")
# r = f.read()
r = f.readline()
f.close()
# informer("REPONSE DONNEE : "+r)
informer("REPONSE DONNEE : "+str(r))
return r
class personnage:
def __init__(self,couleur):
self.couleur, self.suspect, self.position, self.pouvoir = couleur, True, 0, True
def __repr__(self):
susp = "-suspect" if self.suspect else "-clean"
return self.couleur + "-" + str(self.position) + susp
class joueur:
def __init__(self,n):
self.numero = n
self.role = "l'inspecteur" if n == 0 else "le fantome"
def jouer(self,party):
informer("****\n Tour de "+self.role)
p = self.selectionner(party.tuiles_actives)
avec = self.activer_pouvoir(p,party,avant|deux)
self.bouger(p,avec,party.bloque)
self.activer_pouvoir(p,party,apres|deux)
def selectionner(self,t):
w = demander("Tuiles disponibles : " + str(t) + " choisir entre 0 et " + str(len(t)-1),self)
i = int(w) if w.isnumeric() and int(w) in range(len(t)) else 0
p = t[i]
informer("REPONSE INTERPRETEE : "+str(p))
informer(self.role + " joue " + p.couleur)
del t[i]
return p
def activer_pouvoir(self,p,party,activables):
if p.pouvoir and p.couleur in activables:
a = demander("Voulez-vous activer le pouvoir (0/1) ?",self) == "1"
informer("REPONSE INTERPRETEE : "+str(a==1))
if a :
informer("Pouvoir de " + p.couleur + " activé")
p.pouvoir = False
if p.couleur == "rouge":
draw = party.cartes[0]
informer(str(draw) + " a été tiré")
if draw == "fantome":
party.start += -1 if self.numero == 0 else 1
elif self.numero == 0:
draw.suspect = False
del party.cartes[0]
if p.couleur == "noir":
for q in party.personnages:
if q.position in {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque} :
q.position = p.position
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "blanc":
for q in party.personnages:
if q.position == p.position and p != q:
dispo = {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque}
w = demander(str(q) + ", positions disponibles : " + str(dispo) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in dispo else dispo.pop()
informer("REPONSE INTERPRETEE : "+str(x))
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "violet":
informer("Rappel des positions :\n" + str(party))
co = demander("Avec quelle couleur échanger (pas violet!) ?",self)
if co not in couleurs:
co = "rose"
informer("REPONSE INTERPRETEE : "+co)
q = [x for x in party.personnages if x.couleur == co][0]
p.position, q.position = q.position, p.position
informer("NOUVEAU PLACEMENT : "+str(p))
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "marron":
return [q for q in party.personnages if p.position == q.position]
if p.couleur == "gris":
w = demander("Quelle salle obscurcir ? (0-9)",self)
party.shadow = int(w) if w.isnumeric() and int(w) in range(10) else 0
informer("REPONSE INTERPRETEE : "+str(party.shadow))
if p.couleur == "bleu":
w = demander("Quelle salle bloquer ? (0-9)",self)
x = int(w) if w.isnumeric() and int(w) in range(10) else 0
w = demander("Quelle sortie ? Chosir parmi : "+str(passages[x]),self)
y = int(w) if w.isnumeric() and int(w) in passages[x] else passages[x].copy().pop()
informer("REPONSE INTERPRETEE : "+str({x,y}))
party.bloque = {x,y}
return [p]
def bouger(self,p,avec,bloque):
pass_act = pass_ext if p.couleur == 'rose' else passages
if p.couleur != 'violet' or p.pouvoir:
disp = {x for x in pass_act[p.position] if p.position not in bloque or x not in bloque}
w = demander("positions disponibles : " + str(disp) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in disp else disp.pop()
informer("REPONSE INTERPRETEE : "+str(x))
for q in avec:
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
class partie:
def __init__(self,joueurs):
for i in [0,1]:
f = open("./" + str(i) + "/infos.txt","w")
f.close()
f = open("./" + str(i) + "/questions.txt","w")
f.close()
f = open("./" + str(i) + "/reponses.txt","w")
f.close()
self.joueurs = joueurs
self.start, self.end, self.num_tour, self.shadow, x = 4, 22, 1, randrange(10), randrange(10)
self.bloque = {x,passages[x].copy().pop()}
self.personnages = {personnage(c) for c in couleurs}
self.tuiles = [p for p in self.personnages]
self.cartes = self.tuiles[:]
self.fantome = self.cartes[randrange(8)]
message("!!! Le fantôme est : "+self.fantome.couleur,[self.joueurs[1]])
self.cartes.remove(self.fantome)
self.cartes += ['fantome']*3
shuffle(self.tuiles)
shuffle(self.cartes)
for i,p in enumerate(self.tuiles):
p.position = i
def actions(self):
joueur_actif = self.num_tour % 2
if joueur_actif == 1:
shuffle(self.tuiles)
self.tuiles_actives = self.tuiles[:4]
else:
self.tuiles_actives = self.tuiles[4:]
for i in [joueur_actif,1-joueur_actif,1-joueur_actif,joueur_actif]:
self.joueurs[i].jouer(self)
def lumiere(self):
partition = [{p for p in self.personnages if p.position == i} for i in range(10)]
if len(partition[self.fantome.position]) == 1 or self.fantome.position == self.shadow:
informer("le fantome frappe")
self.start += 1
for piece,gens in enumerate(partition):
if len(gens) > 1 and piece != self.shadow:
for p in gens:
p.suspect = False
else:
informer("pas de cri")
for piece,gens in enumerate(partition):
if len(gens) == 1 or piece == self.shadow:
for p in gens:
p.suspect = False
self.start += len([p for p in self.personnages if p.suspect])
def tour(self):
informer("**************************\n" + str(self))
self.actions()
self.lumiere()
for p in self.personnages:
p.pouvoir = True
self.num_tour += 1
def lancer(self):
while self.start < self.end and len([p for p in self.personnages if p.suspect]) > 1:
self.tour()
informer("L'enquêteur a trouvé - c'était " + str(self.fantome) if self.start < self.end else "Le fantôme a gagné")
informer("Score final : "+str(self.end-self.start))
def __repr__(self):
return "Tour:" + str(self.num_tour) + ", Score:"+str(self.start)+"/"+str(self.end) + ", Ombre:" + str(self.shadow) + ", Bloque:" + str(self.bloque) +"\n" + " ".join([str(p) for p in self.personnages])
joueurs = [joueur(0),joueur(1)]
Thread(target=dummy0.lancer).start()
Thread(target=dummy1.lancer).start()
partie(joueurs).lancer()
|
thread.py
|
import threading
import time
import random
# base thread
class MyThread(threading.Thread):
def run(self):
# task implementation
print("in my thread")
thread = MyThread()
thread.run()
# base thread without lock
def show_number_without_lock(number):
wait_time = random.uniform(0, 0.5)
time.sleep(wait_time)
print(str(number) + ',')
thread_list = []
for n in range(1, 11):
thread = threading.Thread(target=show_number_without_lock, args=[n])
thread_list.append(thread)
for i in thread_list:
i.start()
# i.join()
time.sleep(2)
# base thread with lock
lock = threading.Lock()
def show_number_with_lock(number):
with lock:
wait_time = random.uniform(0, 0.5)
time.sleep(wait_time)
print(str(number) + ',')
for n in range(1, 11):
threading.Thread(target=show_number_with_lock, args=(n,)).start()
|
download_from_google_storage.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download files from Google Storage based on SHA1 sums."""
import hashlib
import optparse
import os
import Queue
import re
import shutil
import stat
import sys
import tarfile
import threading
import time
import subprocess2
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
class FileNotFoundError(IOError):
pass
class InvalidFileError(IOError):
pass
class InvalidPlatformError(Exception):
pass
def GetNormalizedPlatform():
"""Returns the result of sys.platform accounting for cygwin.
Under cygwin, this will always return "win32" like the native Python."""
if sys.platform == 'cygwin':
return 'win32'
return sys.platform
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
and is also immutable."""
def __init__(self, path, boto_path=None, timeout=None, version='4.15'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.version = version
def get_sub_env(self):
env = os.environ.copy()
if self.boto_path == os.devnull:
env['AWS_CREDENTIAL_FILE'] = ''
env['BOTO_CONFIG'] = ''
elif self.boto_path:
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
return env
def call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def check_call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
((out, err), code) = subprocess2.communicate(
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
env=self.get_sub_env(),
timeout=self.timeout)
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
'no configured credentials.' in err):
return (403, out, err)
if 'matched no objects' in err:
return (404, out, err)
return (code, out, err)
def check_platform(target):
"""Checks if any parent directory of target matches (win|mac|linux)."""
assert os.path.isabs(target)
root, target_name = os.path.split(target)
if not target_name:
return None
if target_name in ('linux', 'mac', 'win'):
return target_name
return check_platform(root)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
# Download-specific code starts here
def enumerate_work_queue(input_filename, work_queue, directory,
recursive, ignore_errors, output, sha1_file,
auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
raise FileNotFoundError('%s not found.' % input_filename)
print >> sys.stderr, '%s not found.' % input_filename
with open(input_filename, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put((sha1_match.groups(1)[0], output))
return 1
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
return 0
if not directory:
work_queue.put((input_filename, output))
return 1
work_queue_size = 0
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
dirs.remove(item)
else:
for exclude in ['.svn', '.git']:
if exclude in dirs:
dirs.remove(exclude)
for filename in files:
full_path = os.path.join(root, filename)
if full_path.endswith('.sha1'):
if auto_platform:
# Skip if the platform does not match.
target_platform = check_platform(os.path.abspath(full_path))
if not target_platform:
err = ('--auto_platform passed in but no platform name found in '
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
print >> sys.stderr, err
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put(
(sha1_match.groups(1)[0], full_path.replace('.sha1', '')))
work_queue_size += 1
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % filename
return work_queue_size
def _validate_tar_file(tar, prefix):
def _validate(tarinfo):
"""Returns false if the tarinfo is something we explicitly forbid."""
if tarinfo.issym() or tarinfo.islnk():
return False
if '..' in tarinfo.name or not tarinfo.name.startswith(prefix):
return False
return True
return all(map(_validate, tar.getmembers()))
def _downloader_worker_thread(thread_num, q, force, base_url,
gsutil, out_q, ret_codes, verbose, extract,
delete=True):
while True:
input_sha1_sum, output_filename = q.get()
if input_sha1_sum is None:
return
if os.path.exists(output_filename) and not force:
if get_sha1(output_filename) == input_sha1_sum:
if verbose:
out_q.put(
'%d> File %s exists and SHA1 matches. Skipping.' % (
thread_num, output_filename))
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
(code, _, err) = gsutil.check_call('ls', file_url)
if code != 0:
if code == 404:
out_q.put('%d> File %s for %s does not exist, skipping.' % (
thread_num, file_url, output_filename))
ret_codes.put((1, 'File %s for %s does not exist.' % (
file_url, output_filename)))
else:
# Other error, probably auth related (bad ~/.boto, etc).
out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % (
thread_num, file_url, output_filename, err))
ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % (
file_url, output_filename, err)))
continue
# Fetch the file.
out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
if delete:
os.remove(output_filename) # Delete the file if it exists already.
except OSError:
if os.path.exists(output_filename):
out_q.put('%d> Warning: deleting %s failed.' % (
thread_num, output_filename))
code, _, err = gsutil.check_call('cp', file_url, output_filename)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
continue
remote_sha1 = get_sha1(output_filename)
if remote_sha1 != input_sha1_sum:
msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' %
(thread_num, remote_sha1, input_sha1_sum))
out_q.put(msg)
ret_codes.put((20, msg))
continue
if extract:
if (not tarfile.is_tarfile(output_filename)
or not output_filename.endswith('.tar.gz')):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
with tarfile.open(output_filename, 'r:gz') as tar:
dirname = os.path.dirname(os.path.abspath(output_filename))
extract_dir = output_filename[0:len(output_filename)-7]
if not _validate_tar_file(tar, os.path.basename(extract_dir)):
out_q.put('%d> Error: %s contains files outside %s.' % (
thread_num, output_filename, extract_dir))
ret_codes.put((1, '%s contains invalid entries.' % (output_filename)))
continue
if os.path.exists(extract_dir):
try:
shutil.rmtree(extract_dir)
out_q.put('%d> Removed %s...' % (thread_num, extract_dir))
except OSError:
out_q.put('%d> Warning: Can\'t delete: %s' % (
thread_num, extract_dir))
ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir)))
continue
out_q.put('%d> Extracting %d entries from %s to %s' %
(thread_num, len(tar.getmembers()),output_filename,
extract_dir))
tar.extractall(path=dirname)
# Set executable bit.
if sys.platform == 'cygwin':
# Under cygwin, mark all files as executable. The executable flag in
# Google Storage will not be set when uploading from Windows, so if
# this script is running under cygwin and we're downloading an
# executable, it will be unrunnable from inside cygwin without this.
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
code, out, _ = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
elif re.search(r'executable:\s*1', out):
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
def printer_worker(output_queue):
while True:
line = output_queue.get()
# Its plausible we want to print empty lines.
if line is None:
break
print line
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform, extract):
# Start up all the worker threads.
all_threads = []
download_start = time.time()
stdout_queue = Queue.Queue()
work_queue = Queue.Queue()
ret_codes = Queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
target=_downloader_worker_thread,
args=[thread_num, work_queue, force, base_url,
gsutil, stdout_queue, ret_codes, verbose, extract])
t.daemon = True
t.start()
all_threads.append(t)
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
# Enumerate our work queue.
work_queue_size = enumerate_work_queue(
input_filename, work_queue, directory, recursive,
ignore_errors, output, sha1_file, auto_platform)
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
# Wait for all downloads to finish.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if verbose and not max_ret_code:
print 'Success!'
if verbose:
print 'Downloading %d files took %1f second(s)' % (
work_queue_size, time.time() - download_start)
return max_ret_code
def main(args):
usage = ('usage: %prog [options] target\n'
'Target must be:\n'
' (default) a sha1 sum ([A-Za-z0-9]{40}).\n'
' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on '
'the first line.\n'
' (-d or --directory) A directory to scan for .sha1 files.')
parser = optparse.OptionParser(usage)
parser.add_option('-o', '--output',
help='Specify the output file name. Defaults to: '
'(a) Given a SHA1 hash, the name is the SHA1 hash. '
'(b) Given a .sha1 file or directory, the name will '
'match (.*).sha1.')
parser.add_option('-b', '--bucket',
help='Google Storage bucket to fetch from.')
parser.add_option('-e', '--boto',
help='Specify a custom boto file.')
parser.add_option('-c', '--no_resume', action='store_true',
help='Resume download if file is partially downloaded.')
parser.add_option('-f', '--force', action='store_true',
help='Force download even if local file exists.')
parser.add_option('-i', '--ignore_errors', action='store_true',
help='Don\'t throw error if we find an invalid .sha1 file.')
parser.add_option('-r', '--recursive', action='store_true',
help='Scan folders recursively for .sha1 files. '
'Must be used with -d/--directory')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of downloader threads to run.')
parser.add_option('-d', '--directory', action='store_true',
help='The target is a directory. '
'Cannot be used with -s/--sha1_file.')
parser.add_option('-s', '--sha1_file', action='store_true',
help='The target is a file containing a sha1 sum. '
'Cannot be used with -d/--directory.')
parser.add_option('-g', '--config', action='store_true',
help='Alias for "gsutil config". Run this if you want '
'to initialize your saved Google Storage '
'credentials. This will create a read-only '
'credentials file in ~/.boto.depot_tools.')
parser.add_option('-n', '--no_auth', action='store_true',
help='Skip auth checking. Use if it\'s known that the '
'target bucket is a public bucket.')
parser.add_option('-p', '--platform',
help='A regular expression that is compared against '
'Python\'s sys.platform. If this option is specified, '
'the download will happen only if there is a match.')
parser.add_option('-a', '--auto_platform',
action='store_true',
help='Detects if any parent folder of the target matches '
'(linux|mac|win). If so, the script will only '
'process files that are in the paths that '
'that matches the current platform.')
parser.add_option('-u', '--extract',
action='store_true',
help='Extract a downloaded tar.gz file. '
'Leaves the tar.gz file around for sha1 verification'
'If a directory with the same name as the tar.gz '
'file already exists, is deleted (to get a '
'clean state in case of update.)')
parser.add_option('-v', '--verbose', action='store_true', default=True,
help='DEPRECATED: Defaults to True. Use --no-verbose '
'to suppress.')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppresses diagnostic and progress information.')
(options, args) = parser.parse_args()
# Make sure we should run at all based on platform matching.
if options.platform:
if options.auto_platform:
parser.error('--platform can not be specified with --auto_platform')
if not re.match(options.platform, GetNormalizedPlatform()):
if options.verbose:
print('The current platform doesn\'t match "%s", skipping.' %
options.platform)
return 0
# Set the boto file to /dev/null if we don't need auth.
if options.no_auth:
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
'environment, but gsutil in depot_tools does not '
'(yet) obey them.')
print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
'environment variable from being used.')
print >> sys.stderr, ('To use a proxy in this situation, please supply '
'those settings in a .boto file pointed to by '
'the NO_AUTH_BOTO_CONFIG environment var.')
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH,
boto_path=options.boto)
else:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
print '===Note from depot_tools==='
print 'If you do not have a project ID, enter "0" when asked for one.'
print '===End note from depot_tools==='
print
return gsutil.call('config')
if not args:
parser.error('Missing target.')
if len(args) > 1:
parser.error('Too many targets.')
if not options.bucket:
parser.error('Missing bucket. Specify bucket with --bucket.')
if options.sha1_file and options.directory:
parser.error('Both --directory and --sha1_file are specified, '
'can only specify one.')
if options.recursive and not options.directory:
parser.error('--recursive specified but --directory not specified.')
if options.output and options.directory:
parser.error('--directory is specified, so --output has no effect.')
if (not (options.sha1_file or options.directory)
and options.auto_platform):
parser.error('--auto_platform must be specified with either '
'--sha1_file or --directory')
input_filename = args[0]
# Set output filename if not specified.
if not options.output and not options.directory:
if not options.sha1_file:
# Target is a sha1 sum, so output filename would also be the sha1 sum.
options.output = input_filename
elif options.sha1_file:
# Target is a .sha1 file.
if not input_filename.endswith('.sha1'):
parser.error('--sha1_file is specified, but the input filename '
'does not end with .sha1, and no --output is specified. '
'Either make sure the input filename has a .sha1 '
'extension, or specify --output.')
options.output = input_filename[:-5]
else:
parser.error('Unreachable state.')
# Check if output file already exists.
if not options.directory and not options.force and not options.no_resume:
if os.path.exists(options.output):
parser.error('Output file %s exists and --no_resume is specified.'
% options.output)
base_url = 'gs://%s' % options.bucket
return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform,
options.extract)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
test_client.py
|
# test_client.py -- Compatibilty tests for git client.
# Copyright (C) 2010 Google, Inc.
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Compatibilty tests between the Dulwich client and the cgit server."""
import copy
from io import BytesIO
import os
import select
import signal
import subprocess
import sys
import tarfile
import tempfile
import threading
try:
from urlparse import unquote
except ImportError:
from urllib.parse import unquote
try:
import BaseHTTPServer
import SimpleHTTPServer
except ImportError:
import http.server
BaseHTTPServer = http.server
SimpleHTTPServer = http.server
from dulwich import (
client,
errors,
file,
index,
protocol,
objects,
repo,
)
from dulwich.tests import (
SkipTest,
expectedFailure,
)
from dulwich.tests.compat.utils import (
CompatTestCase,
check_for_daemon,
import_repo_to_dir,
rmtree_ro,
run_git_or_fail,
_DEFAULT_GIT,
)
if sys.platform == 'win32':
import ctypes
class DulwichClientTestBase(object):
"""Tests for client/server compatibility."""
def setUp(self):
self.gitroot = os.path.dirname(
import_repo_to_dir('server_new.export').rstrip(os.sep))
self.dest = os.path.join(self.gitroot, 'dest')
file.ensure_dir_exists(self.dest)
run_git_or_fail(['init', '--quiet', '--bare'], cwd=self.dest)
def tearDown(self):
rmtree_ro(self.gitroot)
def assertDestEqualsSrc(self):
repo_dir = os.path.join(self.gitroot, 'server_new.export')
dest_repo_dir = os.path.join(self.gitroot, 'dest')
with repo.Repo(repo_dir) as src:
with repo.Repo(dest_repo_dir) as dest:
self.assertReposEqual(src, dest)
def _client(self):
raise NotImplementedError()
def _build_path(self):
raise NotImplementedError()
def _do_send_pack(self):
c = self._client()
srcpath = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(srcpath) as src:
sendrefs = dict(src.get_refs())
del sendrefs[b'HEAD']
c.send_pack(self._build_path('/dest'), lambda _: sendrefs,
src.object_store.generate_pack_data)
def test_send_pack(self):
self._do_send_pack()
self.assertDestEqualsSrc()
def test_send_pack_nothing_to_send(self):
self._do_send_pack()
self.assertDestEqualsSrc()
# nothing to send, but shouldn't raise either.
self._do_send_pack()
def test_send_without_report_status(self):
c = self._client()
c._send_capabilities.remove(b'report-status')
srcpath = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(srcpath) as src:
sendrefs = dict(src.get_refs())
del sendrefs[b'HEAD']
c.send_pack(self._build_path('/dest'), lambda _: sendrefs,
src.object_store.generate_pack_data)
self.assertDestEqualsSrc()
def make_dummy_commit(self, dest):
b = objects.Blob.from_string(b'hi')
dest.object_store.add_object(b)
t = index.commit_tree(dest.object_store, [(b'hi', b.id, 0o100644)])
c = objects.Commit()
c.author = c.committer = b'Foo Bar <foo@example.com>'
c.author_time = c.commit_time = 0
c.author_timezone = c.commit_timezone = 0
c.message = b'hi'
c.tree = t
dest.object_store.add_object(c)
return c.id
def disable_ff_and_make_dummy_commit(self):
# disable non-fast-forward pushes to the server
dest = repo.Repo(os.path.join(self.gitroot, 'dest'))
run_git_or_fail(['config', 'receive.denyNonFastForwards', 'true'],
cwd=dest.path)
commit_id = self.make_dummy_commit(dest)
return dest, commit_id
def compute_send(self, src):
sendrefs = dict(src.get_refs())
del sendrefs[b'HEAD']
return sendrefs, src.object_store.generate_pack_data
def test_send_pack_one_error(self):
dest, dummy_commit = self.disable_ff_and_make_dummy_commit()
dest.refs[b'refs/heads/master'] = dummy_commit
repo_dir = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(repo_dir) as src:
sendrefs, gen_pack = self.compute_send(src)
c = self._client()
try:
c.send_pack(self._build_path('/dest'),
lambda _: sendrefs, gen_pack)
except errors.UpdateRefsError as e:
self.assertEqual('refs/heads/master failed to update',
e.args[0])
self.assertEqual({b'refs/heads/branch': b'ok',
b'refs/heads/master': b'non-fast-forward'},
e.ref_status)
def test_send_pack_multiple_errors(self):
dest, dummy = self.disable_ff_and_make_dummy_commit()
# set up for two non-ff errors
branch, master = b'refs/heads/branch', b'refs/heads/master'
dest.refs[branch] = dest.refs[master] = dummy
repo_dir = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(repo_dir) as src:
sendrefs, gen_pack = self.compute_send(src)
c = self._client()
try:
c.send_pack(self._build_path('/dest'), lambda _: sendrefs,
gen_pack)
except errors.UpdateRefsError as e:
self.assertIn(
str(e),
['{0}, {1} failed to update'.format(
branch.decode('ascii'), master.decode('ascii')),
'{1}, {0} failed to update'.format(
branch.decode('ascii'), master.decode('ascii'))])
self.assertEqual({branch: b'non-fast-forward',
master: b'non-fast-forward'},
e.ref_status)
def test_archive(self):
c = self._client()
f = BytesIO()
c.archive(self._build_path('/server_new.export'), b'HEAD', f.write)
f.seek(0)
tf = tarfile.open(fileobj=f)
self.assertEqual(['baz', 'foo'], tf.getnames())
def test_fetch_pack(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
result = c.fetch(self._build_path('/server_new.export'), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_depth(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
result = c.fetch(self._build_path('/server_new.export'), dest,
depth=1)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertEqual(
dest.get_shallow(),
set([b'35e0b59e187dd72a0af294aedffc213eaa4d03ff',
b'514dc6d3fbfe77361bcaef320c4d21b72bc10be9']))
def test_repeat(self):
c = self._client()
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
result = c.fetch(self._build_path('/server_new.export'), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
result = c.fetch(self._build_path('/server_new.export'), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_incremental_fetch_pack(self):
self.test_fetch_pack()
dest, dummy = self.disable_ff_and_make_dummy_commit()
dest.refs[b'refs/heads/master'] = dummy
c = self._client()
repo_dir = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(repo_dir) as dest:
result = c.fetch(self._build_path('/dest'), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_no_side_band_64k(self):
c = self._client()
c._fetch_capabilities.remove(b'side-band-64k')
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
result = c.fetch(self._build_path('/server_new.export'), dest)
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
self.assertDestEqualsSrc()
def test_fetch_pack_zero_sha(self):
# zero sha1s are already present on the client, and should
# be ignored
c = self._client()
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
result = c.fetch(
self._build_path('/server_new.export'), dest,
lambda refs: [protocol.ZERO_SHA])
for r in result.refs.items():
dest.refs.set_if_equals(r[0], None, r[1])
def test_send_remove_branch(self):
with repo.Repo(os.path.join(self.gitroot, 'dest')) as dest:
dummy_commit = self.make_dummy_commit(dest)
dest.refs[b'refs/heads/master'] = dummy_commit
dest.refs[b'refs/heads/abranch'] = dummy_commit
sendrefs = dict(dest.refs)
sendrefs[b'refs/heads/abranch'] = b"00" * 20
del sendrefs[b'HEAD']
def gen_pack(have, want, ofs_delta=False):
return 0, []
c = self._client()
self.assertEqual(dest.refs[b"refs/heads/abranch"], dummy_commit)
c.send_pack(
self._build_path('/dest'), lambda _: sendrefs, gen_pack)
self.assertFalse(b"refs/heads/abranch" in dest.refs)
def test_get_refs(self):
c = self._client()
refs = c.get_refs(self._build_path('/server_new.export'))
repo_dir = os.path.join(self.gitroot, 'server_new.export')
with repo.Repo(repo_dir) as dest:
self.assertDictEqual(dest.refs.as_dict(), refs)
class DulwichTCPClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
if check_for_daemon(limit=1):
raise SkipTest('git-daemon was already running on port %s' %
protocol.TCP_GIT_PORT)
fd, self.pidfile = tempfile.mkstemp(prefix='dulwich-test-git-client',
suffix=".pid")
os.fdopen(fd).close()
args = [_DEFAULT_GIT, 'daemon', '--verbose', '--export-all',
'--pid-file=%s' % self.pidfile,
'--base-path=%s' % self.gitroot,
'--enable=receive-pack', '--enable=upload-archive',
'--listen=localhost', '--reuseaddr',
self.gitroot]
self.process = subprocess.Popen(
args, cwd=self.gitroot,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not check_for_daemon():
raise SkipTest('git-daemon failed to start')
def tearDown(self):
with open(self.pidfile) as f:
pid = int(f.read().strip())
if sys.platform == 'win32':
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
os.unlink(self.pidfile)
except (OSError, IOError):
pass
self.process.wait()
self.process.stdout.close()
self.process.stderr.close()
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
def _client(self):
return client.TCPGitClient('localhost')
def _build_path(self, path):
return path
if sys.platform == 'win32':
@expectedFailure
def test_fetch_pack_no_side_band_64k(self):
DulwichClientTestBase.test_fetch_pack_no_side_band_64k(self)
class TestSSHVendor(object):
@staticmethod
def run_command(host, command, username=None, port=None,
password=None, key_filename=None):
cmd, path = command.split(' ')
cmd = cmd.split('-', 1)
path = path.replace("'", "")
p = subprocess.Popen(cmd + [path], bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return client.SubprocessWrapper(p)
class DulwichMockSSHClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
self.real_vendor = client.get_ssh_vendor
client.get_ssh_vendor = TestSSHVendor
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
client.get_ssh_vendor = self.real_vendor
def _client(self):
return client.SSHGitClient('localhost')
def _build_path(self, path):
return self.gitroot + path
class DulwichSubprocessClientTest(CompatTestCase, DulwichClientTestBase):
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
def _client(self):
return client.SubprocessGitClient()
def _build_path(self, path):
return self.gitroot + path
class GitHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""HTTP Request handler that calls out to 'git http-backend'."""
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
self.run_backend()
def do_GET(self):
self.run_backend()
def send_head(self):
return self.run_backend()
def log_request(self, code='-', size='-'):
# Let's be quiet, the test suite is noisy enough already
pass
def run_backend(self):
"""Call out to git http-backend."""
# Based on CGIHTTPServer.CGIHTTPRequestHandler.run_cgi:
# Copyright (c) 2001-2010 Python Software Foundation;
# All Rights Reserved
# Licensed under the Python Software Foundation License.
rest = self.path
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['GIT_PROJECT_ROOT'] = self.server.root_path
env["GIT_HTTP_EXPORT_ALL"] = "1"
env['REQUEST_METHOD'] = self.command
uqrest = unquote(rest)
env['PATH_INFO'] = uqrest
env['SCRIPT_NAME'] = "/"
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64
import binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
content_type = self.headers.get('content-type')
if content_type:
env['CONTENT_TYPE'] = content_type
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = self.headers.get('cookie')
if co:
env['HTTP_COOKIE'] = co
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.wfile.write(b"HTTP/1.1 200 Script output follows\r\n")
self.wfile.write(
("Server: %s\r\n" % self.server.server_name).encode('ascii'))
self.wfile.write(
("Date: %s\r\n" % self.date_time_string()).encode('ascii'))
decoded_query = query.replace('+', ' ')
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
env['CONTENT_LENGTH'] = '0'
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
args = ['http-backend']
if '=' not in decoded_query:
args.append(decoded_query)
stdout = run_git_or_fail(
args, input=data, env=env, stderr=subprocess.PIPE)
self.wfile.write(stdout)
class HTTPGitServer(BaseHTTPServer.HTTPServer):
allow_reuse_address = True
def __init__(self, server_address, root_path):
BaseHTTPServer.HTTPServer.__init__(
self, server_address, GitHTTPRequestHandler)
self.root_path = root_path
self.server_name = "localhost"
def get_url(self):
return 'http://%s:%s/' % (self.server_name, self.server_port)
class DulwichHttpClientTest(CompatTestCase, DulwichClientTestBase):
min_git_version = (1, 7, 0, 2)
def setUp(self):
CompatTestCase.setUp(self)
DulwichClientTestBase.setUp(self)
self._httpd = HTTPGitServer(("localhost", 0), self.gitroot)
self.addCleanup(self._httpd.shutdown)
threading.Thread(target=self._httpd.serve_forever).start()
run_git_or_fail(['config', 'http.uploadpack', 'true'],
cwd=self.dest)
run_git_or_fail(['config', 'http.receivepack', 'true'],
cwd=self.dest)
def tearDown(self):
DulwichClientTestBase.tearDown(self)
CompatTestCase.tearDown(self)
self._httpd.shutdown()
self._httpd.socket.close()
def _client(self):
return client.HttpGitClient(self._httpd.get_url())
def _build_path(self, path):
return path
def test_archive(self):
raise SkipTest("exporting archives not supported over http")
|
tg_rfc2544_trex.py
|
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Trex traffic generation definitions which implements rfc2544 """
from __future__ import absolute_import
from __future__ import print_function
import multiprocessing
import time
import logging
import os
import yaml
from yardstick import ssh
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.utils import get_nsb_option
from stl.trex_stl_lib.trex_stl_client import STLClient
from stl.trex_stl_lib.trex_stl_client import LoggerApi
from stl.trex_stl_lib.trex_stl_exceptions import STLError
LOGGING = logging.getLogger(__name__)
DURATION = 30
WAIT_TIME = 3
TREX_SYNC_PORT = 4500
TREX_ASYNC_PORT = 4501
class TrexTrafficGenRFC(GenericTrafficGen):
"""
This class handles mapping traffic profile and generating
traffic for rfc2544 testcase.
"""
def __init__(self, vnfd):
super(TrexTrafficGenRFC, self).__init__(vnfd)
self._result = {}
self._terminated = multiprocessing.Value('i', 0)
self._queue = multiprocessing.Queue()
self._terminated = multiprocessing.Value('i', 0)
self._traffic_process = None
self._vpci_ascending = None
self.tc_file_name = None
self.client = None
self.my_ports = None
mgmt_interface = self.vnfd["mgmt-interface"]
self.connection = ssh.SSH.from_node(mgmt_interface)
self.connection.wait()
@classmethod
def _split_mac_address_into_list(cls, mac):
octets = mac.split(':')
for i, elem in enumerate(octets):
octets[i] = "0x" + str(elem)
return octets
def _generate_trex_cfg(self, vnfd):
"""
:param vnfd: vnfd.yaml
:return: trex_cfg.yaml file
"""
trex_cfg = dict(
port_limit=0,
version='2',
interfaces=[],
port_info=list(dict(
))
)
trex_cfg["port_limit"] = len(vnfd["vdu"][0]["external-interface"])
trex_cfg["version"] = '2'
cfg_file = []
vpci = []
port = {}
ext_intf = vnfd["vdu"][0]["external-interface"]
for interface in ext_intf:
virt_intf = interface["virtual-interface"]
vpci.append(virt_intf["vpci"])
port["src_mac"] = \
self._split_mac_address_into_list(virt_intf["local_mac"])
time.sleep(WAIT_TIME)
port["dest_mac"] = \
self._split_mac_address_into_list(virt_intf["dst_mac"])
if virt_intf["dst_mac"]:
trex_cfg["port_info"].append(port.copy())
trex_cfg["interfaces"] = vpci
cfg_file.append(trex_cfg)
with open('/tmp/trex_cfg.yaml', 'w') as outfile:
outfile.write(yaml.safe_dump(cfg_file, default_flow_style=False))
self.connection.put('/tmp/trex_cfg.yaml', '/tmp')
self._vpci_ascending = sorted(vpci)
def scale(self, flavor=""):
''' scale vnfbased on flavor input '''
super(TrexTrafficGenRFC, self).scale(flavor)
def instantiate(self, scenario_cfg, context_cfg):
self._generate_trex_cfg(self.vnfd)
self.tc_file_name = '{0}.yaml'.format(scenario_cfg['tc'])
trex = os.path.join(self.bin_path, "trex")
err, _, _ = \
self.connection.execute("ls {} >/dev/null 2>&1".format(trex))
if err != 0:
self.connection.put(trex, trex, True)
LOGGING.debug("Starting TRex server...")
_tg_server = \
multiprocessing.Process(target=self._start_server)
_tg_server.start()
while True:
LOGGING.info("Waiting for TG Server to start.. ")
time.sleep(WAIT_TIME)
status = \
self.connection.execute("sudo lsof -i:%s" % TREX_SYNC_PORT)[0]
if status == 0:
LOGGING.info("TG server is up and running.")
return _tg_server.exitcode
if not _tg_server.is_alive():
raise RuntimeError("Traffic Generator process died.")
def listen_traffic(self, traffic_profile):
pass
def _get_logical_if_name(self, vpci):
ext_intf = self.vnfd["vdu"][0]["external-interface"]
for interface in range(len(self.vnfd["vdu"][0]["external-interface"])):
virtual_intf = ext_intf[interface]["virtual-interface"]
if virtual_intf["vpci"] == vpci:
return ext_intf[interface]["name"]
def run_traffic(self, traffic_profile,
client_started=multiprocessing.Value('i', 0)):
self._traffic_process = \
multiprocessing.Process(target=self._traffic_runner,
args=(traffic_profile, self._queue,
client_started, self._terminated))
self._traffic_process.start()
# Wait for traffic process to start
while client_started.value == 0:
time.sleep(1)
return self._traffic_process.is_alive()
def _start_server(self):
mgmt_interface = self.vnfd["mgmt-interface"]
_server = ssh.SSH.from_node(mgmt_interface)
_server.wait()
_server.execute("sudo fuser -n tcp %s %s -k > /dev/null 2>&1" %
(TREX_SYNC_PORT, TREX_ASYNC_PORT))
_server.execute("sudo pkill -9 rex > /dev/null 2>&1")
trex_path = os.path.join(self.bin_path, "trex/scripts")
path = get_nsb_option("trex_path", trex_path)
cmd = "sudo ./t-rex-64 -i --cfg /tmp/trex_cfg.yaml > /dev/null 2>&1"
trex_cmd = "cd %s ; %s" % (path, cmd)
_server.execute(trex_cmd)
def _connect_client(self, client=None):
if client is None:
client = STLClient(username=self.vnfd["mgmt-interface"]["user"],
server=self.vnfd["mgmt-interface"]["ip"],
verbose_level=LoggerApi.VERBOSE_QUIET)
for idx in range(6):
try:
client.connect()
break
except STLError:
LOGGING.info("Unable to connect to Trex. Attempt %s", idx)
time.sleep(WAIT_TIME)
return client
@classmethod
def _get_rfc_tolerance(cls, tc_yaml):
tolerance = '0.8 - 1.0'
if 'tc_options' in tc_yaml['scenarios'][0]:
tc_options = tc_yaml['scenarios'][0]['tc_options']
if 'rfc2544' in tc_options:
tolerance = \
tc_options['rfc2544'].get('allowed_drop_rate', '0.8 - 1.0')
tolerance = tolerance.split('-')
min_tol = float(tolerance[0])
if len(tolerance) == 2:
max_tol = float(tolerance[1])
else:
max_tol = float(tolerance[0])
return [min_tol, max_tol]
def _traffic_runner(self, traffic_profile, queue,
client_started, terminated):
LOGGING.info("Starting TRex client...")
tc_yaml = {}
with open(self.tc_file_name) as tc_file:
tc_yaml = yaml.load(tc_file.read())
tolerance = self._get_rfc_tolerance(tc_yaml)
# fixme: fix passing correct trex config file,
# instead of searching the default path
self.my_ports = [0, 1]
self.client = self._connect_client()
self.client.reset(ports=self.my_ports)
self.client.remove_all_streams(self.my_ports) # remove all streams
while not terminated.value:
traffic_profile.execute(self)
client_started.value = 1
time.sleep(DURATION)
self.client.stop(self.my_ports)
time.sleep(WAIT_TIME)
last_res = self.client.get_stats(self.my_ports)
samples = {}
for vpci_idx in range(len(self._vpci_ascending)):
name = \
self._get_logical_if_name(self._vpci_ascending[vpci_idx])
# fixme: VNFDs KPIs values needs to be mapped to TRex structure
if not isinstance(last_res, dict):
terminated.value = 1
last_res = {}
samples[name] = \
{"rx_throughput_fps":
float(last_res.get(vpci_idx, {}).get("rx_pps", 0.0)),
"tx_throughput_fps":
float(last_res.get(vpci_idx, {}).get("tx_pps", 0.0)),
"rx_throughput_mbps":
float(last_res.get(vpci_idx, {}).get("rx_bps", 0.0)),
"tx_throughput_mbps":
float(last_res.get(vpci_idx, {}).get("tx_bps", 0.0)),
"in_packets":
last_res.get(vpci_idx, {}).get("ipackets", 0),
"out_packets":
last_res.get(vpci_idx, {}).get("opackets", 0)}
samples = \
traffic_profile.get_drop_percentage(self, samples,
tolerance[0], tolerance[1])
queue.put(samples)
self.client.stop(self.my_ports)
self.client.disconnect()
queue.put(samples)
def collect_kpi(self):
if not self._queue.empty():
result = self._queue.get()
self._result.update(result)
LOGGING.debug("trex collect Kpis %s", self._result)
return self._result
def terminate(self):
self._terminated.value = 1 # stop Trex clinet
self.connection.execute("sudo fuser -n tcp %s %s -k > /dev/null 2>&1" %
(TREX_SYNC_PORT, TREX_ASYNC_PORT))
if self._traffic_process:
self._traffic_process.terminate()
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"BTCP addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Bitcoin Private Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Bitcoin Private Electrum Wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
dataengine_install_libs.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import os
import sys
import logging
import traceback
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
from fabric.api import *
import multiprocessing
def install_libs_on_slaves(slave, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave + 1)
data_engine['slave_ip'] = get_instance_private_ip_address(
data_engine['tag_name'], slave_name)
params = '--os_user {} --instance_ip {} --keyfile "{}" --libs "{}"'\
.format(data_engine['os_user'], data_engine['slave_ip'],
data_engine['keyfile'], data_engine['libs'])
try:
# Run script to install additional libs
local("~/scripts/{}.py {}".format('install_additional_libs', params))
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
if __name__ == "__main__":
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
logging.info('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE]')
print('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE]')
data_engine = dict()
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['master_ip'] = get_instance_private_ip_address(
data_engine['tag_name'], data_engine['master_node_name'])
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
data_engine['libs'] = os.environ['libs']
data_engine['instance_count'] = int(node_count(data_engine['cluster_name']))
except Exception as err:
append_result("Failed to get parameter.", str(err))
sys.exit(1)
params = '--os_user {} --instance_ip {} --keyfile "{}" --libs "{}"' \
.format(data_engine['os_user'], data_engine['master_ip'],
data_engine['keyfile'], data_engine['libs'])
try:
# Run script to install additional libs
local("~/scripts/{}.py {}".format('install_additional_libs', params))
except:
traceback.print_exc()
raise Exception
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=install_libs_on_slaves, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to install additional libraries.", str(err))
sys.exit(1)
|
pyramidhelpers.py
|
import logging
from multiprocessing import Process
from wsgiref.simple_server import make_server
from hncddpylibs.pipeline import S3PipelineService
from pyramid.config import Configurator
log = logging.getLogger(__file__)
def setup_web_server(processor, port, bucket, service_name):
def index(request):
log.info('NEWJOB_RECEIVED: %s' % request.params)
guid = request.params['guid']
p = Process(target=processor, args=(guid, request.job_configurator))
p.start()
return {'status': 'started', 'guid': guid}
config = Configurator()
def get_job_configurator(request):
return S3PipelineService(bucket, service_name)
config.add_request_method(get_job_configurator, 'job_configurator', reify=True)
config.add_route('index', '/')
config.add_view(index, route_name='index', renderer='json')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', port, app)
server.serve_forever()
return config
|
ue_mac.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import threading
from typing import List
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types, dhcp
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
from lte.protos.pipelined_pb2 import SetupFlowsResult, UEMacFlowRequest
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import encode_imsi, decode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._datapath = None
self._dhcp_learn_scratch = \
self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
self._li_port = None
if 'li_local_iface' in kwargs['config']:
self._li_port = \
BridgeTools.get_ofport(kwargs['config']['li_local_iface'])
def initialize_on_connect(self, datapath):
self.delete_all_flows(datapath)
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
self.delete_all_flows(datapath)
def handle_restart(self, ue_requests: List[UEMacFlowRequest]
) -> SetupFlowsResult:
"""
Setup current check quota flows.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self.delete_all_flows(self._datapath)
self._install_default_flows()
for ue_req in ue_requests:
self.add_ue_mac_flow(ue_req.sid.id, ue_req.mac_addr)
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.handle_restart(ue_requests)
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self._dhcp_learn_scratch)
def add_ue_mac_flow(self, sid, mac_addr):
self._add_dhcp_passthrough_flows(sid, mac_addr)
self._add_dns_passthrough_flows(sid, mac_addr)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY)
def delete_ue_mac_flow(self, sid, mac_addr):
self._delete_dhcp_passthrough_flows(sid, mac_addr)
self._delete_dns_passthrough_flows(sid, mac_addr)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.info("Learned imsi %s, ip %s and mac %s",
imsi, yiaddr, chaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=actions,
priority=priority,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, self.tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53,
eth_src=mac_addr)
self._add_resubmit_flow(sid, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53,
eth_dst=mac_addr)
self._add_resubmit_flow(sid, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53,
eth_src=mac_addr)
self._add_resubmit_flow(sid, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53,
eth_dst=mac_addr)
self._add_resubmit_flow(sid, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY)
def _delete_dns_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, ulink_match_udp, action)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, dlink_match_udp, action)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, ulink_match_tcp, action)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, dlink_match_tcp, action)
def _add_dhcp_passthrough_flows(self, sid, mac_addr):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67,
eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match, action,
flows.PASSTHROUGH_PRIORITY)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68,
eth_dst=mac_addr)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(sid, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch)
# Install default flow for dhcp learn scratch
imsi_match = MagmaMatch(imsi=encode_imsi(sid))
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=imsi_match, actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _delete_dhcp_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match, action)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match, action)
imsi_match = MagmaMatch(imsi=encode_imsi(sid))
flows.delete_flow(self._datapath, self._dhcp_learn_scratch, imsi_match)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
if self._li_port:
match = MagmaMatch(in_port=self._li_port)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=[], priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
netapi.py
|
# encoding: utf-8
'''
The main entry point for salt-api
'''
# Import python libs
import logging
import multiprocessing
import signal
import os
# Import salt-api libs
import salt.loader
logger = logging.getLogger(__name__)
class NetapiClient(object):
'''
Start each netapi module that is configured to run
'''
def __init__(self, opts):
self.opts = opts
# pid -> {fun: foo, Process: object}
self.pid_map = {}
self.netapi = salt.loader.netapi(self.opts)
def add_process(self, fun):
'''
Start a netapi child process of "fun"
'''
p = multiprocessing.Process(target=self.netapi[fun])
p.start()
logger.info("Started '{0}' api module with pid {1}".format(fun, p.pid))
self.pid_map[p.pid] = {'fun': fun,
'Process': p}
def run(self):
'''
Load and start all available api modules
'''
for fun in self.netapi:
if fun.endswith('.start'):
self.add_process(fun)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
pid, exit_status = os.wait()
if pid not in self.pid_map:
logger.info(('Process of pid {0} died, not a known netapi'
' process, will not restart').format(pid))
continue
logger.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self.pid_map[pid]['fun'],
pid,
exit_status))
self.pid_map[pid]['Process'].join(1)
self.add_process(self.pid_map[pid]['fun'])
del self.pid_map[pid]
def kill_children(self, *args):
'''
Kill all of the children
'''
for pid, p_map in self.pid_map.items():
p_map['Process'].terminate()
p_map['Process'].join()
del self.pid_map[pid]
|
sdk_worker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import Queue as queue
import threading
import traceback
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
class SdkHarness(object):
def __init__(self, control_address):
self._control_channel = grpc.insecure_channel(control_address)
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory()
def run(self):
contol_stub = beam_fn_api_pb2.BeamFnControlStub(self._control_channel)
# TODO(robertwb): Wire up to new state api.
state_stub = None
self.worker = SdkWorker(state_stub, self._data_channel_factory)
responses = queue.Queue()
no_more_work = object()
def get_responses():
while True:
response = responses.get()
if response is no_more_work:
return
yield response
def process_requests():
for work_request in contol_stub.Control(get_responses()):
logging.info('Got work %s', work_request.instruction_id)
try:
response = self.worker.do_instruction(work_request)
except Exception: # pylint: disable=broad-except
logging.error(
'Error processing instruction %s',
work_request.instruction_id,
exc_info=True)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=work_request.instruction_id,
error=traceback.format_exc())
responses.put(response)
t = threading.Thread(target=process_requests)
t.start()
t.join()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
responses.put(no_more_work)
self._data_channel_factory.close()
logging.info('Done consuming work.')
class SdkWorker(object):
def __init__(self, state_handler, data_channel_factory):
self.fns = {}
self.state_handler = state_handler
self.data_channel_factory = data_channel_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will construct
# InstructionResponse(register=self.register(request.register))
return beam_fn_api_pb2.InstructionResponse(**{
'instruction_id': request.instruction_id,
request_type: getattr(self, request_type)
(getattr(request, request_type), request.instruction_id)
})
else:
raise NotImplementedError
def register(self, request, unused_instruction_id=None):
for process_bundle_descriptor in request.process_bundle_descriptor:
self.fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.RegisterResponse()
def process_bundle(self, request, instruction_id):
bundle_processor.BundleProcessor(
self.fns[request.process_bundle_descriptor_reference],
self.state_handler,
self.data_channel_factory).process_bundle(instruction_id)
return beam_fn_api_pb2.ProcessBundleResponse()
|
database.py
|
#!/usr/bin/env python3
"""Connectivity modules for various NoSQL databases."""
__author__ = 'Philipp Engel'
__copyright__ = 'Copyright (c) 2019, Hochschule Neubrandenburg'
__license__ = 'BSD-2-Clause'
import logging
import threading
import time
from typing import Any, Dict, Union
try:
import couchdb
except ImportError:
logging.getLogger().warning('Importing Python module "couchdb" failed')
try:
from tinydb import TinyDB
from tinydb.storages import MemoryStorage
except ImportError:
logging.getLogger().warning('Importing Python module "tinydb" failed')
from core.manager import Manager
from core.observation import Observation
from core.prototype import Prototype
class CouchDriver(Prototype):
"""
CouchDriver provides connectivity for Apache CouchDB. Observations send to
a CouchDriver instance will be cached and then stored in the database
defined the configuration. TinyDB is used for caching (either file-based
or in-memory).
Parameters:
server (str): FQDN or IP address of CouchDB server.
path (str): Additional CouchDB instance path or blank.
port (int): Port number of CouchDB server.
user (str): User name.
password (str): Password.
db (str): Database name.
tls (bool): Use TLS encryption (default: False).
cacheFile (str): Optional file name of local cache database
(e.g., `cache.json`). If not set, an in-memory database will be
used instead.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._couch = None # CouchDB driver.
self._db = None # CouchDB database.
self._thread = None # Thread doing the caching.
self._timeout = 30.0 # Time to wait on connection error.
cache_file = config.get('cacheFile')
# Initialise local cache database.
if not cache_file or cache_file.strip() == "":
# Create in-memory cache database.
self.logger.verbose('Creating in-memory cache database ...')
self._cache_db = TinyDB(storage=MemoryStorage)
else:
# Create file-based cache database.
try:
self.logger.verbose(f'Opening local cache database '
f'"{cache_file}" ...')
self._cache_db = TinyDB(cache_file)
except Exception:
raise ValueError(f'Cache database "{self._db_file}" could '
f'not be opened')
# Use either HTTPS or HTTP.
is_tls = config.get('tls', False)
self._scheme = 'https' if is_tls else 'http'
# Configuration of the CouchDB server.
self._server = config.get('server')
self._path = config.get('path', '')
self._port = config.get('port', 5984)
user = config.get('user')
password = config.get('password')
# Generate URI to CouchDB server, for example:
# https://<user>:<password>@iot.example.com:443/couchdb/
self._server_uri = (f'{self._scheme}://{user}:{password}@{self._server}'
f':{self._port}/{self._path}')
# Set name of database to open.
self._db_name = config.get('db')
def _cache_observation_data(self, obs: Observation) -> str:
"""Caches the given observation in local cache database.
Args:
obs: The observation object.
Returns:
Document id of cached data set.
"""
doc_id = self._cache_db.insert(obs.data)
return doc_id
def _connect(self) -> None:
"""Connects to CouchDB database server.
Raises:
Exception: On connection error.
"""
# Connect to CouchDB server.
self.logger.info(f'Connecting to CouchDB server "{self._scheme}://'
f'{self._server}:{self._port}/{self._path}" ...')
self._couch = couchdb.Server(self._server_uri)
# Open database.
if self._db_name not in self._couch:
self.logger.error(f'Database "{self._db_name}" not found on server '
f'"{self._server_uri}"')
self.logger.info(f'Opening CouchDB database "{self._db_name}" ...')
self._db = self._couch[self._db_name]
def _get_cached_observation_data(self) -> Union[Dict[str, Any], None]:
""""Returns a random JSON-serialised observation data set from the
local cache database.
Returns:
Observation data or None if cache is empty.
"""
if len(self._cache_db) > 0:
return self._cache_db.all()[0]
return None
def _insert_observation_data(self, obs_data: Dict[str, Any]) -> bool:
"""Inserts observation data into CouchDB database.
Args:
obs_data: The observation data.
Returns:
True on success, False on failure.
"""
try:
if self._couch is None:
self._connect()
self._db[obs_data.get('id')] = obs_data
self.logger.info(f'Saved observation "{obs_data.get("name")}" of '
f'target "{obs_data.get("target")}" from '
f'port "{obs_data.get("portName")}" to CouchDB '
f'database "{self._db_name}"')
except Exception as e:
self.logger.error(f'Observation "{obs_data.get("name")}" with '
f'target "{obs_data.get("target")}" from port '
f'"{obs_data.get("portName")}" could not be '
f'saved in CouchDB database "{self._db_name}": '
f'{str(e)}')
return False
return True
def _remove_observation_data(self, doc_id: int) -> None:
"""Removes a single observations from the local cache database.
Args:
doc_id: The document id.
"""
self._cache_db.remove(doc_ids=[doc_id])
self.logger.debug(f'Removed observation from cache (id {doc_id})')
def has_cached_observation_data(self) -> bool:
"""Returns whether or not a cached observation exists in the local
cache database.
Returns:
True if cached observation exists, False if not.
"""
return True if len(self._cache_db) > 0 else False
def process_observation(self, obs: Observation) -> Observation:
doc_id = self._cache_observation_data(obs)
if doc_id:
self.logger.debug(f'Cached observation "{obs.get("name")}" of '
f'target "{obs.get("target")}" (id {doc_id})')
else:
self.logger.error(f'Caching of observation "{obs.get("name")}" of '
f'target "{obs.get("target")}" failed')
return obs
def run(self) -> None:
"""Inserts cached observation data into CouchDB database."""
while self.is_running:
# Poor men's event handling ...
if not self.has_cached_observation_data():
time.sleep(1.0)
continue
if len(self._cache_db) > 500:
self.logger.warning('Cache database is running full '
'({} cached observations)'
.format(len(self._cache_db)))
# Insert cached observation data into CouchDB database.
obs_data = self._get_cached_observation_data()
if not obs_data:
continue
self.logger.debug(f'Trying to insert observation '
f'"{obs_data.get("name")}" of target '
f'"{obs_data.get("target")}" '
f'(id {obs_data.doc_id}) into CouchDB '
f'database "{self._db_name}" ...')
# Remove the inserted observation data from local cache.
if self._insert_observation_data(obs_data):
self._remove_observation_data(obs_data.doc_id)
else:
time.sleep(self._timeout)
def start(self) -> None:
"""Starts the module."""
if self._is_running:
return
super().start()
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
class TinyDriver(Prototype):
"""
TinyDriver stores observations in a TinyDB document store.
Parameters:
path (str): Path to the database file.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._path = config.get('path')
def process_observation(self, obs: Observation) -> Observation:
try:
self.logger.debug(f'Opening TinyDB document store '
f'"{self._path}" ...')
db = TinyDB(self._path)
doc_id = db.insert(obs.data)
self.logger.verbose(f'Saved observation "{obs.get("name")}" of '
f'target "{obs.get("target")}" in document '
f'store "{self._path}" (id {doc_id})')
db.close()
self.logger.debug(f'Closed TinyDB document store'
f'"{self._path}" ...')
except Exception as e:
self.logger.critical(f'Could not access document store '
f'"{self._path}": {str(e)}')
return obs
|
kafkaConsumer_Firebase.py
|
import os
import json
from time import sleep
import signal
import threading
import uuid
from kafka import KafkaConsumer
from cryptography.fernet import Fernet
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
def read_from_kafka(topic_name, kafka_addr, f, db, closing_event):
try:
kafka_consumer = KafkaConsumer(bootstrap_servers=kafka_addr,
auto_offset_reset='latest',
api_version=(0, 10, 1),
security_protocol="SASL_SSL",
sasl_mechanism='SCRAM-SHA-256',
sasl_plain_username='5qkeygj1',
sasl_plain_password='QovYFGlHVR5VG3SrKvOEqxrDeH9dk21o')
kafka_consumer.subscribe([topic_name])
except:
kafka_consumer = None
else:
for msg in kafka_consumer:
if closing_event.is_set(): break
message = json.loads(f.decrypt(msg.value).decode('utf-8'))
# message is a dictionary
print(f"\nMessage: {message}")
# Post message on Firebase database
doc = str(message["server"]+"-"+message["node"]+"-"+message["serverTimestamp"]+"-"+ str(uuid.uuid4()))
#print(f"\nDoc: {doc}")
doc_ref = db.collection("OpcUaNodes").document(doc)
doc_ref.set({
'value': str(message["value"]),
'node': str(message["node"]),
'server': str(message["server"]),
'sourcetimestamp': str(message["sourceTimestamp"]),
'servertimestamp': str(message["serverTimestamp"]),
'status': str(message["status"])
})
#print(result)
if kafka_consumer is not None:
kafka_consumer.close()
def main():
def signal_handler(sig, frame):
print("\nCONSUMER STOPPED! (You pressed CTRL+C)")
closing_event.set()
#thread_opc.join()
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
with open("config_kafkaConsumer.json") as f:
try:
data = json.load(f)
except Exception as ex:
print(f"ERROR, not a valid JSON!\n{ex.__class__, ex.args}\nExit...")
os._exit(0)
# Use a service account
cred = credentials.Certificate('opcua-client-kafka-gateway-firebase-adminsdk-hn72v-4c7137a285.json')
firebase_admin.initialize_app(cred)
db = firestore.client()
try:
kafka_addr = data["KafkaServer"]
topic_name = data["topic"]
#firebase_collection = data["firebaseCollection"]
except KeyError:
print("ERROR in JSON configuration file! Exit...")
os._exit(0)
with open("secret.txt", "rb") as file:
key = file.read()
f = Fernet(key)
print("\nCONSUMER STARTED...Press CTRL+C to STOP.")
closing_event = threading.Event()
thread_opc = threading.Thread(target=read_from_kafka, args=(topic_name, kafka_addr, f, db, closing_event,))
thread_opc.start()
while(not closing_event.is_set()):
sleep(1)
if __name__ == '__main__':
main()
|
ThreadingTest.py
|
##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import threading
import random
import os
import IECore
class ThreadingTest( unittest.TestCase ) :
def callSomeThings( self, things, args=(), kwArgs=(), threaded=False, iterations=1 ) :
for i in range( 0, iterations ) :
threads = []
for j in range( 0, len( things ) ) :
a = args[j] if args else ()
kwa = kwArgs[j] if kwArgs else {}
if threaded :
t = threading.Thread( target=things[j], args=a, kwargs=kwa )
t.start()
threads.append( t )
else :
things[j]( *a, **kwa )
for t in threads :
t.join()
def testThreadedOpGains( self ) :
## Checks that we actually get a speedup by running a bunch of slow
# C++ ops in parallel.
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.ParticleMeshOp() )
kwArgs.append( {
"filename" : "test/IECore/data/pdcFiles/particleMesh.pdc",
"useRadiusAttribute" : False,
"radius" : 0.25,
"threshold" : 1,
"bound" : IECore.Box3f( IECore.V3f( -5, -7, -2 ), IECore.V3f( 3, 3, 3 ) ),
"resolution" : IECore.V3i( 80, 80, 80 ),
} )
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # may fail on single core machines or machines under varying load
def testThreadedReaders( self ) :
## Checks that we can read a bunch of files in parallel, even when one
# of the Readers is implemented in python. We're using the CachedReader
# here as it forces a call to Reader::create when the GIL isn't held yet.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/tiff/toTrace.tif", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
]
sp = IECore.SearchPath( "./", ":" )
calls = [ lambda f : IECore.CachedReader( sp, 1024 * 1024 * 10 ).read( f ) ] * len( args )
self.callSomeThings( calls, args, threaded=True )
def testMixedCPPAndPython( self ) :
## Checks that we can mix a bunch of C++ and python ops concurrently
# without crashing
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.ParticleMeshOp() )
kwArgs.append( {
"filename" : "test/IECore/data/pdcFiles/particleMesh.pdc",
"useRadiusAttribute" : False,
"radius" : 0.25,
"threshold" : 1,
"bound" : IECore.Box3f( IECore.V3f( -5, -7, -2 ), IECore.V3f( 3, 3, 3 ) ),
"resolution" : IECore.V3i( 80, 80, 80 ),
} )
ops.append( IECore.ClassLsOp() )
kwArgs.append( { "type" : "op" } )
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True, iterations=5 )
def testReadingGains( self ) :
## Checks that we can use a bunch of readers in different threads and
# that we get a speedup of some sort doing that.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/pdcFiles/particleMesh.pdc", ),
( "test/IECore/data/cobFiles/ball.cob", ),
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
]
calls = [ lambda f : IECore.Reader.create( f ).read() ] * len( args )
tStart = time.time()
self.callSomeThings( calls, args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testWritingGains( self ) :
image = IECore.Reader.create( "test/IECore/data/jpg/21mm.jpg" ).read()
def write( o, f ) :
IECore.Writer.create( o, f ).write()
calls = []
for i in range( 0, 4 ) :
fileName = "test/IECore/test%d.jpg" % i
calls.append( IECore.curry( write, image, fileName ) )
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testCachedReaderConcurrency( self ) :
args = [
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
]
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ), 1024 * 1024 * 50 )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
for i in range( 0, 5 ) :
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
def testCachedReaderGains( self ) :
args = [
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/jpg/greyscaleCheckerBoard.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
] * 4
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ), 1024 * 1024 * 50 )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testPythonColorConverterWithThread( self ) :
def NewSRGBToLinear( inputColorSpace, outputColorSpace ) :
converter = IECore.SRGBToLinearOp()
return converter
IECore.ColorSpaceTransformOp.registerConversion(
"newSRGB", "linear", NewSRGBToLinear
)
runThread = True
def test():
while runThread :
pass
newThread = threading.Thread(target=test)
newThread.start()
reader = IECore.Reader.create( "test/IECore/data/cinFiles/uvMap.512x256.cin" )
reader['colorSpace'] = 'newSRGB'
reader.read()
runThread = False
newThread.join()
def testInterpolatedCacheGains( self ) :
numObjects = 100
numAttrs = 2
def createCache( fileName ) :
data = IECore.V3fVectorData( [ IECore.V3f( 1 ) ] * 50000 )
cache = IECore.AttributeCache( fileName, IECore.IndexedIO.OpenMode.Write )
for i in range( 0, numObjects ) :
for j in range( 0, numAttrs ) :
cache.write( "object%d" % i, "attr%d" % j, data )
createCache( "test/IECore/interpolatedCache.0250.fio" )
createCache( "test/IECore/interpolatedCache.0500.fio" )
cache = IECore.InterpolatedCache(
"test/IECore/interpolatedCache.####.fio",
IECore.InterpolatedCache.Interpolation.Linear,
)
calls = []
for i in range( 0, 200 ) :
calls.append(
IECore.curry(
cache.read,
1.5,
"object%d" % random.uniform( 0, numObjects ),
"attr%d" % random.uniform( 0, numAttrs )
)
)
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def tearDown( self ) :
for f in [
"test/IECore/test0.jpg",
"test/IECore/test1.jpg",
"test/IECore/test2.jpg",
"test/IECore/test3.jpg",
"test/IECore/interpolatedCache.0250.fio",
"test/IECore/interpolatedCache.0500.fio",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
covaidapp.py
|
# import the necessary packages
from __future__ import print_function
from PIL import Image
from PIL import ImageTk
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import tensorflow as tf
import tkinter as tki
import threading
import datetime
import imutils
import cv2
import os
import argparse
class COVAIDApp:
# Parse all arguments
parser = argparse.ArgumentParser(
description='Thermal screening demo by Codevector Labs.')
parser.add_argument('-t', '--threshold_temperature', dest='threshold_temperature', default=100.5, type=float,
help='Threshold temperature in Farenheit (float)', required=False)
parser.add_argument('-b', '--binary_threshold', dest='binary_threshold', default=200, type=int,
help='Threshold pixel value for binary threshold (between 0-255)', required=False)
parser.add_argument('-c', '--conversion_factor', dest='conversion_factor', default=2.25, type=float,
help='Conversion factor to convert pixel value to temperature (float)', required=False)
parser.add_argument('-a', '--min_area', dest='min_area', default=2400, type=int,
help='Minimum area of the rectangle to consider for further porcessing (int)', required=False)
parser.add_argument('-i', '--input_video', dest='input_video', default=os.path.join("data", "input.mp4"), type=str,
help='Input video file path (string)', required=False)
parser.add_argument('-o', '--output_video', dest='output_video', default=os.path.join("output", "output.avi"), type=str,
help='Output video file path (string)', required=False)
parser.add_argument('-f', '--fps', dest='fps', default=15, type=int,
help='FPS of output video (int)', required=False)
args = parser.parse_args().__dict__
# Global Variables
threshold_temperature = 90.0
currentUnit = "f"
def convert_to_temperature(self, pixel_avg):
"""
Converts pixel value (mean) to temperature (farenheit) depending upon the camera hardware
"""
return pixel_avg / self.args['conversion_factor']
def process_temperature_frame(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
heatmap_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
heatmap = cv2.applyColorMap(heatmap_gray, cv2.COLORMAP_HOT)
image_with_rectangles = np.copy(heatmap)
# Binary threshold
_, binary_thresh = cv2.threshold(
heatmap_gray, self.args['binary_threshold'], 255, cv2.THRESH_BINARY)
# Image opening: Erosion followed by dilation
kernel = np.ones((5, 5), np.uint8)
image_erosion = cv2.erode(binary_thresh, kernel, iterations=1)
image_opening = cv2.dilate(image_erosion, kernel, iterations=1)
# Get contours from the image obtained by opening operation
contours, _ = cv2.findContours(image_opening, 1, 2)
temperatureArray = [0.0]
for contour in contours:
# rectangle over each contour
x, y, w, h = cv2.boundingRect(contour)
# Pass if the area of rectangle is not large enough
if (w) * (h) < self.args['min_area']:
continue
# Mask is boolean type of matrix.
mask = np.zeros_like(heatmap_gray)
cv2.drawContours(mask, contour, -1, 255, -1)
# Mean of only those pixels which are in blocks and not the whole rectangle selected
mean = self.convert_to_temperature(cv2.mean(heatmap_gray, mask=mask)[0])
# Colors for rectangles and textmin_area
temperature = round(mean, 2)
if temperature:
temperatureArray.append(temperature)
return max(temperatureArray)
def process_temperature_face_frame(self, frame):
# grab the dimensions of the frame and then construct a blob
# from it
image_with_rectangles = np.copy(frame)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=True)
# pass the blob through the network and obtain the face detections
self.faceNet.setInput(blob)
detections = self.faceNet.forward()
print(detections.shape)
print(self.threshold_temperature)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
maxTemperature = self.process_temperature_frame(frame)
color = (255, 0, 0)
# Draw rectangles for visualisation
image_with_rectangles = cv2.rectangle(
image_with_rectangles, (startX, startY), (endX, endY), color, 2)
# Write temperature for each rectangle
cv2.putText(image_with_rectangles, "{} F".format(maxTemperature), (startX, startY),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
return image_with_rectangles
def detect_and_predict_mask(self, frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=True)
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
faceTemperatureList = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
maxTemperature = self.process_temperature_frame(face)
faceTemperatureList.append(maxTemperature)
# face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = tf.keras.applications.mobilenet.preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds, faceTemperatureList)
def __init__(self, vs):
# store the video stream object and output path, then initialize
# the most recently read frame, thread for reading frames, and
# the thread stop event
self.vs = vs
self.frame = None
self.thread = None
self.stopEvent = None
prototxtPath = r"face_detector/deploy.prototxt"
weightsPath = r"face_detector/res10_300x300_ssd_iter_140000.caffemodel"
self.faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
self.maskNet = load_model("face_mask_model.h5")
# initialize the root window and image panel
self.root = tki.Tk()
self.isFahrenheit = tki.IntVar()
self.panel = None
bottomframe = tki.Frame(self.root)
bottomframe.pack(side="bottom")
# create a button, that when pressed, will take the current
# frame and save it to file
# label = tki.Label(self, text ="Temp Treshold")
# label.pack(side="bottom", fill="both", expand="yes", padx=10,
# pady=10)
middleframe = tki.Frame(bottomframe)
middleframe.pack(side="top")
temperatureframe = tki.Frame(middleframe)
temperatureframe.pack(side="top")
label = tki.Label(temperatureframe, text="Temperature Threshold")
label.pack(side="left", fill="both", expand="yes", padx=10,
pady=10)
self.textBox = tki.Entry(temperatureframe)
self.textBox.pack(side="right", fill="both", expand="yes", padx=10,
pady=10)
unitsframe = tki.Frame(middleframe)
unitsframe.pack(side="bottom")
label = tki.Label(unitsframe, text="Units")
label.pack(side="left", fill="both", expand="yes", padx=10,
pady=10)
R1 = tki.Radiobutton(unitsframe, text="Celcius", variable=self.isFahrenheit, value=1, command=self.sel)
R1.pack(side="left", fill="both", expand="yes", padx=10,
pady=10)
R2 = tki.Radiobutton(unitsframe, text="Farenheit", variable=self.isFahrenheit, value=0, command=self.sel)
R2.pack(side="left", fill="both", expand="yes", padx=10,
pady=10)
legendFrame = tki.Frame(bottomframe)
legendFrame.pack(side="bottom")
btn = tki.Button(legendFrame, text="Submit",
command=self.takeSnapshot)
btn.pack(side="top", fill="both", expand="yes", padx=10,
pady=10)
redLabel = tki.Label(legendFrame, text="A Red coloured box indicates your temperature is higher than the Threshold", anchor='w')
redLabel.pack(side="top", fill="both", expand="yes", padx=10,
pady=10)
yellowLabel = tki.Label(legendFrame, text="A Yellow coloured box indicates your temperature is within Threshold, but your mask is not detected", anchor='w')
yellowLabel.pack(side="top", fill="both", expand="yes", padx=10,
pady=10)
greenLabel = tki.Label(legendFrame, text="A Green coloured box indicates you satisfy the given safety protocol", anchor='w')
greenLabel.pack(side="top", fill="both", expand="yes", padx=10,
pady=10)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("COV-AID")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def sel(self):
print(self.isFahrenheit.get())
if self.currentUnit == "f" and self.isFahrenheit.get() == 1:
self.currentUnit = "c"
self.threshold_temperature = 5 * (self.threshold_temperature - 32) / 9
elif self.currentUnit == "c" and self.isFahrenheit.get() == 0:
self.currentUnit = "f"
self.threshold_temperature = (9 * self.threshold_temperature + 160) / 5
def videoLoop(self):
# DISCLAIMER:
# I'm not a GUI developer, nor do I even pretend to be. This
# try/except statement is a pretty ugly hack to get around
# a RunTime error that Tkinter throws due to threading
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
self.frame = self.vs.read()
self.frame = imutils.resize(self.frame, width=1300)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds, faceTemperatureList) = self.detect_and_predict_mask(self.frame, self.faceNet, self.maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred, temperature) in zip(locs, preds, faceTemperatureList):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
isHighTemperature = False
isNoMask = False
if mask > withoutMask:
maskLabel = "Mask"
else:
maskLabel = "No Mask"
isNoMask = True
# include the probability in the label
maskLabel = "{}: {:.2f}%".format(maskLabel, max(mask, withoutMask) * 100)
temperatureLabel = ""
# print(self.threshold_temperature)
if self.isFahrenheit.get() == 1:
temperature = 5 * (temperature - 32) / 9
if temperature < self.threshold_temperature:
temperatureLabel = "Temperature: " + str(temperature) + ("F" if self.isFahrenheit.get() == 0 else "C")
else:
temperatureLabel = "High temperature: " + str(temperature) + ("F" if self.isFahrenheit.get() == 0 else "C")
isHighTemperature = True
temperatureColor = (0, 93, 196)
boxColor = (0, 255, 0)
if (isHighTemperature and isNoMask) or (isHighTemperature):
boxColor = (0, 0, 255)
elif isNoMask:
boxColor = (0, 255, 255)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(self.frame, maskLabel, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, boxColor, 2)
cv2.putText(self.frame, temperatureLabel, (startX, endY + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, temperatureColor, 2)
cv2.rectangle(self.frame, (startX, startY), (endX, endY), boxColor, 2)
# if the panel is not None, we need to initialize it
# OpenCV represents images in BGR order; however PIL
# represents images in RGB order, so we need to swap
# the channels, then convert to PIL and ImageTk format
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def takeSnapshot(self):
# grab the current timestamp and use it to construct the
# output path
self.threshold_temperature = float(self.textBox.get())
def onClose(self):
# set the stop event, cleanup the camera, and allow the rest of
# the quit process to continue
print("[INFO] closing...")
cv2.destroyAllWindows()
self.stopEvent.set()
self.vs.stop()
self.root.quit()
|
dataloader.py
|
import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from matching import candidate_reselect as matching
from SPPE.src.utils.eval import getPrediction, getMultiPeakPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class Image_loader(data.Dataset):
def __init__(self, im_names, format='yolo'):
super(Image_loader, self).__init__()
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
def getitem_ssd(self, index):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
return im, inp, im_name
def getitem_yolo(self, index):
inp_dim = int(opt.inp_dim)
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im, orig_img, im_dim = prep_image(im_name, inp_dim)
#im_dim = torch.FloatTensor([im_dim]).repeat(1, 2)
inp = load_image(im_name)
return im, inp, orig_img, im_name, im_dim
def __getitem__(self, index):
if self.format == 'ssd':
return self.getitem_ssd(index)
elif self.format == 'yolo':
return self.getitem_yolo(index)
else:
raise NotImplementedError
def __len__(self):
return len(self.imglist)
class ImageLoader:
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
self.batchSize = batchSize
self.datalen = len(self.imglist)
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
def getitem_ssd(self):
length = len(self.imglist)
for index in range(length):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
while self.Q.full():
time.sleep(2)
self.Q.put((im, inp, im_name))
def getitem_yolo(self):
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem(self):
return self.Q.get()
def length(self):
return len(self.imglist)
def len(self):
return self.Q.qsize()
class VideoLoader:
def __init__(self, path, batchSize=1, queueSize=50):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path = path
stream = cv2.VideoCapture(path)
assert stream.isOpened(), 'Cannot capture source'
self.fourcc=int(stream.get(cv2.CAP_PROP_FOURCC))
self.fps=stream.get(cv2.CAP_PROP_FPS)
self.frameSize=(int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.stopped = False
self.batchSize = batchSize
self.datalen = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def length(self):
return self.datalen
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.Q.put((None, None, None, None))
print('===========================> This video get '+str(k)+' frames in total.')
sys.stdout.flush()
return
# process and add the frame to the queue
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(k)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def videoinfo(self):
# indicate the video info
return (self.fourcc,self.fps,self.frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
return self.Q.qsize()
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
self.datalen = self.dataloder.length()
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.num_batches):
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
if img is None:
self.Q.put((None, None, None, None, None, None, None))
return
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
self.datalen = self.detectionLoader.datalen
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = pQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
if orig_img is None:
self.Q.put((None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class VideoDetectionLoader:
def __init__(self, path, batchSize=4, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
def length(self):
return self.datalen
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole video
for i in range(self.num_batches):
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class WebcamLoader:
def __init__(self, webcam, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img, orig_img, dim = prep_frame(frame, inp_dim)
inp = im_to_torch(orig_img)
im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
self.Q.put((img, orig_img, inp, im_dim_list))
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
self.flag = 0
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
if opt.matching:
preds = getMultiPeakPrediction(
hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = matching(boxes, scores.numpy(), preds)
else:
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(
boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
person = []
for idx in range(0, len(result['result'])):
person.append( [np.round(result['result'][idx].get('keypoints')[5],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[6],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[11],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[12],0).numpy().astype('int') ] )
person = np.abs(person)
for i in person:
i[:,0] = np.where(i[:,0] >= orig_img.shape[1], orig_img.shape[1]-1, i[:,0])
i[:,1] = np.where(i[:,1] >= orig_img.shape[0], orig_img.shape[0]-1, i[:,1])
print(person)
TXT = open("coord.txt", "a", encoding="utf8")
for idx in range(0, len(result['result'])):
print("LeftShoulder = ", np.round(result['result'][idx].get('keypoints')[5],0).numpy().astype('int'))
print("RightShoulder = ", np.round(result['result'][idx].get('keypoints')[6],0).numpy().astype('int'))
print("LeftHip = ", np.round(result['result'][idx].get('keypoints')[7],0).numpy().astype('int'))
print("RightHip = ", np.round(result['result'][idx].get('keypoints')[8],0).numpy().astype('int'))
print("\n")
ls=np.round(result['result'][idx].get('keypoints')[5],0).numpy().astype('int')
rs=np.round(result['result'][idx].get('keypoints')[6],0).numpy().astype('int')
lh=np.round(result['result'][idx].get('keypoints')[11],0).numpy().astype('int')
rh=np.round(result['result'][idx].get('keypoints')[12],0).numpy().astype('int')
TXT.write(str(ls[0]))
TXT.write("\n")
TXT.write(str(ls[1]))
TXT.write("\n")
TXT.write(str(rs[0]))
TXT.write("\n")
TXT.write(str(rs[1]))
TXT.write("\n")
TXT.write(str(lh[0]))
TXT.write("\n")
TXT.write(str(lh[1]))
TXT.write("\n")
TXT.write(str(rh[0]))
TXT.write("\n")
TXT.write(str(rh[1]))
TXT.write("\n")
TXT.close()
"""
person = []
temp = []
for idx in range(0, len(result['result'])):
person.append( [np.round(result['result'][idx].get('keypoints')[5],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[6],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[11],0).numpy().astype('int') ,
np.round(result['result'][idx].get('keypoints')[12],0).numpy().astype('int') ] )
person = np.abs(person)
for idx in range(0, len(result['result'])):
if person[idx][0][0] >= orig_img.size() :
person[idx][0][0] = 853
if orig_img[person[idx][0][1]][person[idx][0][0]][2] < 140 or orig_img[person[idx][0][1]][person[idx][0][0]][1] > 90 or orig_img[person[idx][0][1]][person[idx][0][0]][0] > 90:
temp.append(idx)
temp.reverse()
for i in temp:
del result['result'][i]
"""
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
try:
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
except IndexError:
print(tmp_img.shape)
print(upLeft)
print(bottomRight)
print('===')
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
plant_guard.py
|
import RPi.GPIO as GPIO
import time
from threading import Lock, Thread
from pathlib import Path
lock = Lock()
keyboardListening = False
keyboardDir = 0
movementTerminated = False
rikt = []
PUMP_PIN = 5
PUMP_REV_PIN = 6
def init(exitOnLoadFailure):
GPIO.setmode(GPIO.BCM)
initPump()
initStepper(exitOnLoadFailure)
def cleanup():
GPIO.cleanup()
def initPump():
GPIO.setup(PUMP_PIN, GPIO.OUT)
GPIO.output(PUMP_PIN, GPIO.LOW)
GPIO.setup(PUMP_REV_PIN, GPIO.OUT)
GPIO.output(PUMP_REV_PIN, GPIO.LOW)
pass
def startPump():
print("pump: 1")
GPIO.output(PUMP_PIN, GPIO.HIGH)
def stopPump():
print("pump: 0")
GPIO.output(PUMP_PIN, GPIO.LOW)
STEPPER_PINS = [24, 23, 22, 27]
STEPS = [[1,0,0,0],[1,1,0,0],[0,1,0,0],[0,1,1,0],[0,0,1,0],[0,0,1,1],[0,0,0,1],[1,0,0,1]]
currentStep = 0
currentAngle = 0
fullRot = 512*8
stepperSleepDur = 0.005
def computeChecksum():
return 0xFFFFFFFF - (currentStep + currentAngle)
def saveFilePath(backup):
return str(Path.home())+'/plant-guard/save'+('_backup.txt' if backup else '.txt')
def readSavedData(backup):
global currentStep
global currentAngle
try:
with open(saveFilePath(backup)) as file:
currentStep = int(file.readline())
currentAngle = int(file.readline())
checksum = int(file.readline())
if checksum != computeChecksum():
raise
except:
return False
return True
def writeSavedData(backup):
with open(saveFilePath(backup), 'w') as file:
file.writelines([str(currentStep)+'\n', str(currentAngle)+'\n', str(computeChecksum())+'\n'])
def initStepper(exitOnLoadFailure):
if not readSavedData(False) and not readSavedData(True):
if exitOnLoadFailure:
print("ERROR: No saved state file found")
exit(-1)
else:
print("WARNING: No saved state file found. This is ok if running for the first time")
for p in STEPPER_PINS:
GPIO.setup(p, GPIO.OUT)
updateStepper()
def updateStepper():
# print(STEPS[currentStep])
writeSavedData(False)
writeSavedData(True)
for (p, v) in zip(STEPPER_PINS, STEPS[currentStep]):
GPIO.output(p, GPIO.HIGH if v == 1 else GPIO.LOW)
def degAngle(deg):
return round(deg*512.0/45.0)
def rotateSteps(stepsCnt):
global currentStep
global currentAngle
with lock:
for _ in range(abs(stepsCnt)):
if stepsCnt > 0:
currentStep = (currentStep+1)%len(STEPS)
currentAngle = (currentAngle+1)%fullRot
else:
currentStep = (currentStep - 1 + len(STEPS))%len(STEPS)
currentAngle = (currentAngle - 1 + fullRot)%fullRot
updateStepper()
time.sleep(stepperSleepDur)
if movementTerminated:
break
def rotateTo(angle):
global currentAngle
while angle >= fullRot:
angle -= fullRot
while angle < 0:
angle += fullRot
with lock:
stepsCnt = angle - currentAngle
while stepsCnt > fullRot/2: # 'if' should be good enough...
stepsCnt -= fullRot
while stepsCnt < -fullRot/2: # 'if' should be good enough...
stepsCnt += fullRot
#don't rotate through 360 degrees because of hose and (future) cables
if currentAngle <= fullRot/2 and angle > fullRot/2 and stepsCnt > 0:
stepsCnt -= fullRot
elif currentAngle > fullRot/2 and angle <= fullRot/2 and stepsCnt < 0:
stepsCnt += fullRot
rotateSteps(stepsCnt)
def patrol(angle1, angle2):
if angle1 == angle2:
return
while not movementTerminated:
rotateTo(angle2)
if not movementTerminated:
rotateTo(angle1)
def water(angle1, angle2, dur):
global movementTerminated
rotateTo(angle1)
startPump()
movementTerminated = False
t = Thread(target=patrol, args=(angle1, angle2))
t.start()
time.sleep(dur)
movementTerminated = True
t.join()
movementTerminated = False
stopPump()
time.sleep(1.0)
def setKeyboardListening(kl):
global keyboardListening
global rikt
if keyboardListening != kl:
keyboardListening = kl
if kl:
rikt = Thread(target=rotateIfKey)
rikt.start()
else:
rikt.join()
def setKeyboardDir(kd):
global keyboardDir
keyboardDir = kd
def getKeyboardDir():
return keyboardDir
def getCurrentAngle():
return currentAngle
def rotateIfKey():
global currentStep
global currentAngle
while keyboardListening:
if keyboardDir != 0:
with lock:
if keyboardDir > 0:
currentStep = (currentStep+1)%len(STEPS)
currentAngle = (currentAngle+1)%fullRot
else:
currentStep = (currentStep - 1 + len(STEPS))%len(STEPS)
currentAngle = (currentAngle - 1 + fullRot)%fullRot
updateStepper()
time.sleep(stepperSleepDur)
|
virustotal.py
|
#!/usr/bin/env python3
__author__ = "Gawen Arab"
__copyright__ = "Copyright 2012, Gawen Arab"
__credits__ = ["Gawen Arab"]
__license__ = "MIT"
__version__ = "1.0.3-python3"
__maintainer__ = "Gawen Arab"
__email__ = "g@wenarab.com"
__status__ = "Production"
# Snippet from http://code.activestate.com/recipes/146306/
from http import client as httpClient
import mimetypes
from urllib.parse import urlparse
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.request import Request
import urllib3
import hashlib
import json
import time
import re
import logging
import threading
logger = logging.getLogger("virustotal")
FILE_SIZE_LIMIT = 30 * 1024 * 1024 # 30MB
class postfile:
@staticmethod
def post_multipart(host, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
content_type, body = postfile.encode_multipart_formdata(fields, files)
h = httpClient.HTTPSConnection(host)
h.putrequest('POST', selector)
h.putheader('Content-Type', content_type)
h.putheader('Content-Length', str(len(body)))
h.endheaders()
h.send(body)
response = h.getresponse()
errcode = response.status
errmsg = response.reason
headers = response.getheaders()
return response.read()
@staticmethod
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
boundary = b'----------ThIs_Is_tHe_bouNdaRY_$'
body = ((
b''.join(b"--%b\r\n"
b"Content-Disposition: form-data; name=\"%b\"\r\n"
b"\r\n"
b"%b\r\n" % (boundary, field.encode('utf8'), value.encode('utf8'))
for field, value in fields)
) + b''.join(b"--%b\r\n"
b"Content-Disposition: form-data; name=\"%b\"; filename=\"%b\"\r\n"
b"Content-Type: %b\r\n"
b"\r\n"
b"%b\r\n" % (boundary, key.encode('utf8'),
filename.encode('utf8'),
bytes(postfile.get_content_type(filename),'utf8'),
value)
for key, filename, value in files) +
b"--%b--\r\n" % boundary)
content_type = b'multipart/form-data; boundary=' + boundary
return content_type, body
@staticmethod
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
class VirusTotal(object):
_SCAN_ID_RE = re.compile(r"^[a-fA-F0-9]{64}-[0-9]{10}$")
class EntityTooLarge(Exception):
pass
class ApiError(Exception):
pass
def __init__(self, api_key, limit_per_min = None):
limit_per_min = limit_per_min if limit_per_min is not None else 4
super(VirusTotal, self).__init__()
self.api_key = api_key
self.limit_per_min = limit_per_min
self.limits = []
self.limit_lock = threading.Lock()
def __repr__(self):
return "<VirusTotal proxy>"
def _limit_call_handler(self):
with self.limit_lock:
if self.limit_per_min <= 0:
return
now = time.time()
self.limits = [l for l in self.limits if l > now]
self.limits.append(now + 60)
if len(self.limits) >= self.limit_per_min:
wait = self.limits[0] - now
logger.info("Wait for %.2fs because of quotat limit." % (self.limits[0] - now, ))
time.sleep(self.limits[0] - now)
@classmethod
def _fileobj_to_fcontent(cls, anything, filename = None):
# anything can be:
# - A MD5, SHA1, SHA256
# - A scan id
# - A filepath or URL
# - A file object
if isinstance(anything, str):
# Is MD5, SHA1, SHA256?
if all(i in "1234567890abcdef" for i in anything.lower()) and len(anything) in [32, 40, 64]:
return ["resource", anything, filename]
if cls._SCAN_ID_RE.match(anything):
return ["resource", anything, filename]
# Is URL ?
if urlparse(anything).scheme:
fh = urlopen(anything)
else:
# it's file
fh = open(anything, "rb")
with fh as f:
return cls._fileobj_to_fcontent(f)
assert hasattr(anything, "read")
content = anything.read()
if hasattr(anything, "name") and isinstance(anything.name, str):
filename = anything.name
return ["file", filename, content]
def get(self, anything, filename = None):
logger.info("Get report of %r" % (anything, ))
o = self._fileobj_to_fcontent(anything, filename)
if o[0] == "file":
o = (
"resource",
hashlib.sha256(o[2]).hexdigest(),
o[2],
)
data = urlencode({
"apikey": self.api_key,
"resource": o[1],
}).encode('utf8')
self._limit_call_handler()
req = urlopen("http://www.virustotal.com/vtapi/v2/file/report", data).read()
report = Report(req, self)
return report
def scan(self, anything, filename = None, reanalyze = None):
reanalyze = reanalyze if reanalyze is not None else False
if not reanalyze:
# Check if already exists
report = self.get(anything, filename)
if report is not None:
return report
logger.info("Analyze %r" % (anything, ))
o = self._fileobj_to_fcontent(anything, filename)
assert o[0] == "file"
if o[1] is None:
o[1] = "file"
if len(o[2]) > FILE_SIZE_LIMIT:
raise self.EntityTooLarge()
self._limit_call_handler()
ret_json = postfile.post_multipart(
host = "www.virustotal.com",
selector = "https://www.virustotal.com/vtapi/v2/file/scan",
fields = {
"apikey": self.api_key,
}.items(),
files = [
o,
],
)
report = Report(ret_json, self)
if report:
report.update()
return report
class Report(object):
def __new__(cls, r, parent):
if isinstance(r, bytes):
try:
r = json.loads(r)
except ValueError:
raise VirusTotal.ApiError()
assert isinstance(r, dict)
if r["response_code"] == 0:
return None
self = super(Report, cls).__new__(cls)
self.parent = parent
self.update(r)
return self
def update(self, data = None):
data = data if data is not None else self.parent.get(self.scan_id)._report
self._report = data
def __getattr__(self, attr):
# Aliases
item = {
"id": "resource",
"status": "verbose_msg",
}.get(attr, attr)
try:
return self._report[item]
except KeyError:
raise AttributeError(attr)
def __repr__(self):
return "<VirusTotal report %s (%s)>" % (
self.id,
self.status,
)
@property
def state(self):
return {
-2: "analyzing",
1: "ok",
0: "ko",
}.get(self.response_code, "unknown")
@property
def done(self):
return self.state == "ok"
def __iter__(self):
for antivirus, report in self.scans.items():
yield (
(antivirus, report["version"], report["update"]),
report["result"],
)
def join(self, timeout = None, interval = None):
interval = interval if interval is not None else 60
if timeout is not None:
timeout = time.time() + timeout
self.update()
while self.state != "ok" and (timeout is None or time.time() < timeout):
time.sleep(interval)
self.update()
def main():
import sys
import optparse
import threading
from queue import Queue
import glob
from pathlib import Path
import os
parser = optparse.OptionParser(usage = """%prog [-k API_KEY] (scan|get) RESOURCE ...
'scan' asks virustotal to scan the file, even if a report
is available. The resource must be a file.
'get' asks virustotal if a report is available for the given
resource.
A resource can be:
- a hash (md5, sha1, sha256)
- a scan ID
- a filepath or URL""")
parser.add_option("-k", "--key", dest = "api_key", default = None, help = "Set VirusTotal API key.")
parser.add_option("-l", "--limit", dest = "limit_per_min", default = "4", help = "Set limit per minute API call. VirusTotal specifies no more 4 API calls must be done per minute. You can change this value, but VirusTotal maybe ignores some calls and may make this script bug.")
parser.add_option("-v", "--verbose", dest = "verbose", action = "store_true", default = False, help = "Verbose.")
(options, arguments) = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if options.verbose else logging.WARNING)
logging.basicConfig()
# This is my API key. Please use it only for examples, not for any production stuff
# You can get an API key signing-up on VirusTotal. It takes 2min.
API_KEY = "XXXXXXX"
api_key = options.api_key or API_KEY
if len(sys.argv) < 3:
parser.print_usage()
return -1
action = arguments.pop(0)
if action.lower() not in ("scan", "get", ):
print("ERROR: unknown action")
return -1
resources = []
for argument in arguments:
for resource in glob.glob(argument):
resources.append(resource)
v = VirusTotal(API_KEY, limit_per_min = int(options.limit_per_min))
q = Queue()
def analyze(resource):
try:
if action.lower() == "scan":
report = v.scan(resource, reanalyze = True)
print("%s: Scan started: %s" % (resource, report, ))
report.join()
q.put((resource, report))
print("%s: Scan finished: %s" % (resource, report, ))
print(resource)
elif action.lower() == "get":
report = v.get(resource)
q.put((resource, report))
except VirusTotal.ApiError:
print("VirusTotal returned a non correct response. It may be because the script does too many requests at the minute. See the parameter -l")
threads = []
for resource in resources:
if os.path.isdir(resource):
for file_ in Path(resource).rglob('*'):
if os.path.isfile(file_):
t = threading.Thread(target = analyze, args = (str(file_), ))
threads.append(t)
t.daemon = True
t.start()
else:
t = threading.Thread(target = analyze, args = (resource, ))
threads.append(t)
t.daemon = True
t.start()
for thread in threads:
while thread.is_alive():
try:
thread.join(0.1)
except KeyboardInterrupt:
return
total_positives = 0
while not q.empty():
resource, report = q.get()
print("=== %s ===" % (resource, ))
if report is None:
print("No report is available.")
return 0
print("Report:", report.positives, "/", report.total)
print("- Resource's UID:", report.id)
print("- Scan's UID:", report.scan_id)
print("- Permalink:", report.permalink)
print("- Resource's SHA1:", report.sha1)
print("- Resource's SHA256:", report.sha256)
print("- Resource's MD5:", report.md5)
print()
for antivirus, virus in report:
print("- %s (%s, %s):\t%s" % (antivirus[0], antivirus[1], antivirus[2], virus, ))
print()
total_positives += int(report.positives)
return total_positives
if __name__ == "__main__":
main()
|
uDNS.py
|
# coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import time
import threading
import traceback
import socketserver
import argparse
import codecs
import json
from dnslib import *
TTL = 60 * 5 # completely arbitrary TTL value
round_robin = False
default_records = list()
records = dict()
class DomainName(str):
def __getattr__(self, item):
return DomainName(item + '.' + self)
class BaseRequestHandler(socketserver.BaseRequestHandler):
def get_data(self):
raise NotImplementedError
def send_data(self, data):
raise NotImplementedError
def handle(self):
now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
print("\n\n%s request %s (%s %s):" % (self.__class__.__name__[:3], now, self.client_address[0],
self.client_address[1]))
try:
data = self.get_data()
self.send_data(dns_response(data))
except Exception:
traceback.print_exc(file=sys.stderr)
class TCPRequestHandler(BaseRequestHandler):
def get_data(self):
data = self.request.recv(8192).strip()
sz = int(codecs.encode(data[:2], 'hex'), 16)
if sz < len(data) - 2:
raise Exception("Wrong size of TCP packet")
elif sz > len(data) - 2:
raise Exception("Too big TCP packet")
return data[2:]
def send_data(self, data):
sz = codecs.decode(hex(len(data))[2:].zfill(4), 'hex')
return self.request.sendall(sz + data)
class UDPRequestHandler(BaseRequestHandler):
def get_data(self):
return self.request[0].strip()
def send_data(self, data):
return self.request[1].sendto(data, self.client_address)
def build_domain_mappings(path):
with open(path) as f:
zone_file = json.load(f)
for domain in zone_file['mappings']:
for d in iter(domain.keys()):
# this loop only runs once, kind of a hack to access the only key in the dict
domain_name = DomainName(d)
print("Domain name:", domain_name)
records[domain_name] = [A(x) for x in domain[domain_name]]
print(records[domain_name])
default_records.extend([A(d) for d in zone_file['otherwise']])
def add_authoritative_records(reply, domain):
# ns1 and ns1 are hardcoded in, change if necessary
reply.add_auth(RR(rname=domain, rtype=QTYPE.NS, rclass=1, ttl=TTL, rdata=NS(domain.ns1)))
reply.add_auth(RR(rname=domain, rtype=QTYPE.NS, rclass=1, ttl=TTL, rdata=NS(domain.ns2)))
def dns_response(data):
''' dns_response takes in the raw bytes from the socket and does all the logic behind what
RRs get returned as the response '''
global default_records, records, TTL, round_robin
request = DNSRecord.parse(data)
print(request)
reply = DNSRecord(DNSHeader(id=request.header.id, qr=1, aa=1, ra=1), q=request.q)
qname = request.q.qname
qn = str(qname)
qtype = request.q.qtype
qt = QTYPE[qtype]
found_specific = False
# first look for a specific mapping
for domain, rrs in records.items():
if domain == qn or qn.endswith('.' + domain):
# we are the authoritative name server for this domain and all subdomains
for rdata in rrs:
# only include requested record types (ie. A, MX, etc)
rqt = rdata.__class__.__name__
if qt in ['*', rqt]:
found_specific = True
reply.add_answer(RR(rname=qname, rtype=getattr(QTYPE, str(rqt)), rclass=1, ttl=TTL, rdata=rdata))
# rotate the A entries if round robin is on
if round_robin:
a_records = [x for x in rrs if type(x) == A]
records[domain] = a_records[1:] + a_records[:1] # rotate list
break
# else if a specific mapping is not found, return default A-records
if not found_specific:
for a in default_records:
reply.add_answer(RR(rname=qname, rtype=QTYPE.A, rclass=1, ttl=TTL, rdata=a))
if round_robin:
default_records = default_records[1:] + default_records[:1]
print("---- Reply: ----\n", reply)
return reply.pack()
if __name__ == '__main__':
# handle cmd line args
parser = argparse.ArgumentParser()
parser.add_argument("ip_addr", type=str, help="Interface", default="127.0.0.1")
parser.add_argument("port", type=int, help="port uDNS should listen on")
parser.add_argument("zone_file", help="path to zone file")
parser.add_argument("--rr", action='store_true',
help='round robin load balances if multiple IP addresses are present for 1 domain')
args = parser.parse_args()
if args.rr:
round_robin = True
build_domain_mappings(args.zone_file)
servers = [
socketserver.ThreadingUDPServer((args.ip_addr, args.port), UDPRequestHandler),
socketserver.ThreadingTCPServer((args.ip_addr, args.port), TCPRequestHandler),
]
print("Starting DNS...")
for s in servers:
thread = threading.Thread(target=s.serve_forever) # that thread will start one more thread for each request
thread.daemon = True # exit the server thread when the main thread terminates
thread.start()
try:
while 1:
time.sleep(1)
sys.stderr.flush()
sys.stdout.flush()
except KeyboardInterrupt:
pass
finally:
for s in servers:
s.shutdown()
|
helpers.py
|
"""Helper functions/classes for unit tests."""
from __future__ import division
import copy
from contextlib import contextmanager
from queue import Queue
import socket
from threading import Thread
from time import sleep
from typing import Dict, Generator, List, Pattern, Union
from flashfocus.client import ClientMonitor
from flashfocus.compat import (
DisplayHandler,
get_focused_window,
get_focused_workspace,
list_mapped_windows,
Window,
)
from flashfocus.errors import WMError
from flashfocus.server import FlashServer
from test.compat import change_focus, clear_event_queue, create_blank_window, switch_workspace
Producer = Union[ClientMonitor, DisplayHandler]
def quick_conf() -> Dict:
return dict(
default_opacity=1,
flash_opacity=0.8,
time=100,
ntimepoints=4,
simple=False,
rules=None,
flash_on_focus=True,
flash_lone_windows="always",
flash_fullscreen=True,
)
def default_flash_param() -> Dict:
return {
"config": {"default": None, "type": [str], "location": "cli"},
"verbosity": {"default": "INFO", "type": str, "location": "cli"},
"default_opacity": {"default": 1, "type": [float], "location": "any"},
"flash_opacity": {"default": 0.8, "type": [float], "location": "any"},
"time": {"default": 100, "type": [float], "location": "any"},
"ntimepoints": {"default": 4, "type": [int], "location": "any"},
"simple": {"default": False, "type": [bool], "location": "any"},
"flash_on_focus": {"default": True, "type": [bool], "location": "any"},
"flash_lone_windows": {"default": "always", "type": [str], "location": "any"},
"flash_fullscreen": {"default": True, "type": [bool], "location": "any"},
"rules": {"default": None, "type": [list, type(None)], "location": "config_file"},
"window_id": {"default": "window1", "type": [Pattern], "location": "rule"},
"window_class": {"default": "Window1", "type": [Pattern], "location": "rule"},
}
class WindowSession:
"""A session of blank windows for testing."""
def __init__(self, num_windows: int = 2) -> None:
wm_names = ["window" + str(i) for i in range(1, num_windows + 1)]
wm_classes = zip(wm_names, [name.capitalize() for name in wm_names])
clear_desktops()
self.windows = [
create_blank_window(wm_name, wm_class)
for wm_name, wm_class in zip(wm_names, wm_classes)
]
# Wait for all of the windows to be mapped
for window in self.windows:
while window not in list_mapped_windows():
pass
change_focus(self.windows[0])
# Wait for the focus to actually change
while get_focused_window() != self.windows[0]:
pass
def destroy(self) -> None:
"""Tear down the window session."""
for window in self.windows:
try:
window.destroy()
except Exception:
pass
class WindowWatcher(Thread):
"""Watch a window for changes in opacity."""
def __init__(self, window: Window):
super(WindowWatcher, self).__init__()
self.window: Window = window
self.opacity_events: List[float] = [window.opacity]
self.keep_going: bool = True
self.done: bool = False
def run(self) -> None:
"""Record opacity changes until stop signal received."""
while self.keep_going:
opacity = self.window.opacity
if opacity != self.opacity_events[-1]:
self.opacity_events.append(opacity)
self.done = True
def stop(self) -> None:
# Give the x server a little time to catch up with requests
sleep(0.2)
self.keep_going = False
while not self.done:
pass
self.opacity_events = [1 if event is None else event for event in self.opacity_events]
def count_flashes(self):
num_flashes = 0
for i, event in enumerate(self.opacity_events):
if 0 < i < len(self.opacity_events) - 1:
if event < self.opacity_events[i - 1] and event < self.opacity_events[i + 1]:
num_flashes += 1
return num_flashes
class StubServer:
"""A server socket which receives a hunk of data and stores it in a list.
Used to test that clients are making correct requests.
"""
def __init__(self, socket: socket.socket):
self.socket = socket
self.data: List[bytes] = []
def await_data(self):
"""Wait for a single piece of data from a client and store it."""
self.data.append(self.socket.recv(1))
def queue_to_list(queue: Queue) -> List:
"""Convert a Queue to a list."""
result = []
while queue.qsize() != 0:
result.append(queue.get())
return result
@contextmanager
def server_running(server: FlashServer) -> Generator:
clear_event_queue()
p = Thread(target=server.event_loop)
p.start()
while not server.ready:
pass
yield
while not server.events.empty():
pass
server.shutdown(disconnect_from_wm=False)
@contextmanager
def watching_windows(windows: List[Window]) -> Generator:
watchers = [WindowWatcher(window) for window in windows]
for watcher in watchers:
watcher.start()
yield watchers
for watcher in watchers:
watcher.stop()
def clear_desktops():
for workspace in range(5):
clear_workspace(workspace)
switch_workspace(0)
while not get_focused_workspace() == 0:
pass
@contextmanager
def new_watched_window() -> Generator:
"""Open a new window and watch it."""
window_session = WindowSession(1)
watcher = WindowWatcher(window_session.windows[0])
watcher.start()
sleep(0.1)
yield window_session.windows[0], watcher
watcher.stop()
window_session.destroy()
@contextmanager
def producer_running(producer: Producer) -> Generator:
producer.start()
# TODO - replace these sleep calls
sleep(0.01)
yield
sleep(0.01)
producer.stop()
def fill_in_rule(partial_rule: Dict) -> Dict:
"""Fill in default param for a rule given a partial rule definition."""
default_rules = {
key: val["default"]
for key, val in default_flash_param().items()
if val["location"] == "any"
}
for key, value in default_rules.items():
if key not in partial_rule.keys():
partial_rule[key] = copy.deepcopy(value)
return partial_rule
def rekey(dic: Dict, new_vals: Dict) -> Dict:
dic_copy = copy.deepcopy(dic)
for key, val in new_vals.items():
dic_copy[key] = val
return dic_copy
def clear_workspace(workspace: int) -> None:
for window in list_mapped_windows(workspace):
try:
window.destroy()
except WMError:
pass
|
image_queue.py
|
from threading import Thread, Event
from contextlib import contextmanager
from queue import Queue
import numpy as np
import time
def _start(queue, stop_event):
"""
Thread target function. Starts the ImageQueue._populate function which runs
indefinitely until stop_event is set.
Args:
queue: A reference to the ImageQueue object onto which the threads
apply.
stop_event: An even that can be set in the main thread to stop
population of the ImageQueue
"""
while not stop_event.is_set():
queue._populate()
class ImageQueue(object):
"""
Queue object handling loading ImagePair data from disk, preprocessing those
images and adding them to a queue from which mpunet.sequence objects
can pull them for training
The queue will store a maximum number of loaded ImagePairs at a given time.
When the queue is not full, one or more threads will perform the following
sequence of actions in parallel to populate the queue:
1) Select a random image from the ImagePairLoader object
2) An entry function is called on the ImagePair that will perform some
preprocessing operations
3) The image is added to the queue
Whenever 1) is performed, a sample may with a probability managed by
attribute 'self.load_new_prob' instead re-add an image already loaded,
processed and added to the queue. This process is much faster, and will be
performed with a higher probability if the queue is being emptied too fast
for the population to keep up.
ImagePairs are pulled from the queue using the context manager method 'get':
...
with image_queue.get() as image:
# Do something with 'image', an ImagePair object
f(image)
...
When the with statement exits, one of two things happens in ImageQueue.get:
1) If the ImagePair no longer exists in the queue, the ImageQueue
invokes an exit function on the ImagePair. This function will
usually free memory.
2) If the ImagePair is still in queue - which may happen when the same
image is re-added to queue to prevent queue exhaustion - the exit
function is NOT invoked.
TODO: Race conditions may occur in updating/referencing the dictionary
storing the number of times images are currently loaded in queue,
during executing of entry and exit functions and during calculation
of load_new_prob.
Update to a more robust queuing approach.
Move to a TensorFlow queue based system perhaps?
"""
def __init__(self, max_queue_size, image_pair_loader, entry_func=None,
entry_func_kw=None, exit_func=None, exit_func_kw=None):
"""
Args:
max_queue_size: Int, the maximum number of ImagePair objects
to store in the queue at a given time
image_pair_loader: The ImagePairLoader object from which images are
fetched.
entry_func: String giving name of method to call on the
ImagePair object at queue entry time.
entry_func_kw: Dict, keyword arguments to supply to entry_func
exit_func: String giving name of method to call on the
ImagePair object at queue exit time.
exit_func_kw: Dict, keyword arguments to supply to exit_func
"""
# Reference Queue and ImagePairLoader objects
self.queue = Queue(maxsize=max_queue_size)
self.image_pair_loader = image_pair_loader
# Initialize probability of loading a new (not currently in queue)
# image to 1.0 (queue is empty at first anyway)
self.load_new_prob = 1.0
# Call the entry func when an image is added to the queue and the exit
# func when the image leaves the queue
self.entry_func = (entry_func, entry_func_kw or {})
self.exit_func = (exit_func, exit_func_kw or {})
# Store reference to all running threads
self.threads = []
# Store the number of times each image identifier is currently in the queue
self.items_in_queue = 0
self._last = 0
self.no_new_counter = 0
# Reference to images not in queue and IDs in queue
self.num_times_in_queue = {image: 0 for image in self.image_pair_loader}
@property
def load_new_prob(self):
return self._load_new_prob
@load_new_prob.setter
def load_new_prob(self, value):
self._load_new_prob = np.clip(value, 0.05, 1.0)
def set_entry_func(self, func_str, func_kw=None):
self.entry_func = (func_str, func_kw or {})
def set_exit_func(self, func_str, func_kw=None):
self.exit_func = (func_str, func_kw or {})
def wait_N(self, N):
"""
Sleep until N images has been added to the queue
Args:
N: Int, number of images to wait for
"""
cur = self.items_in_queue
while self.items_in_queue < cur + N-1:
time.sleep(1)
@contextmanager
def get(self):
"""
Context manager method pulling an image from the queue and yielding it
At yield return time the exit_func is called upon the image unless it
has another reference later in the queue
yields:
an ImagePair from the queue
"""
if self.items_in_queue < 0.1 * self.queue.maxsize:
# If queue is almost empty, halt the main thread a bit
self.wait_N(N=3)
# Get the image from the queue
image = self.queue.get()
# Check if too high new_prob
if self._last:
diff = self.items_in_queue - self._last
if diff > 0 or self.items_in_queue >= self.queue.maxsize-1:
# If queue is increasing in size, increase load new prob
self.load_new_prob *= 1.05
elif diff < 0:
# If queue is decreasing in size, decrease load new prob
self.load_new_prob *= 0.95
else:
self._last = self.items_in_queue
# Yield back
yield image
# Update reference attributes
self.items_in_queue -= 1
self.num_times_in_queue[image] -= 1
# Call exit function on the object
if self.num_times_in_queue[image] == 0:
# Unload if last in the queue
getattr(image, self.exit_func[0])(**self.exit_func[1])
image.load_state = None
def start(self, n_threads=3):
"""
Start populating the queue in n_threads
Args:
n_threads: Number of threads to spin up
"""
for _ in range(n_threads):
stop_event = Event()
thread = Thread(target=_start, args=(self, stop_event))
thread.start()
self.threads.append((thread, stop_event))
def stop(self):
"""
Stop populating the queue by invoking the stop event on all threads and
wait for them to terminate.
"""
print("Stopping %i threads" % len(self.threads))
for _, event in self.threads:
# Make sure no threads keep working after next addition to the Q
event.set()
for i, (t, _) in enumerate(self.threads):
# Wait for the threads to stop
print(" %i/%i" % (i+1, len(self.threads)), end="\r", flush=True)
t.join()
print("")
@property
def unique_in_queue(self):
"""
Returns:
Int, the current number of unique images in the queue
"""
return sum([bool(m) for m in self.num_times_in_queue.values()])
def await_full(self):
"""
Halt main thread until queue object is populated to its max capacity
"""
while self.items_in_queue < self.queue.maxsize:
print(" Data queue being populated %i/%i" % (self.items_in_queue,
self.queue.maxsize),
end='\r', flush=True)
time.sleep(1)
def _populate(self):
"""
Puts a random image into the queue. The ImagePair is either taken from
the ImagePairLoader in an un-loaded state or from the already loaded,
processed images stored in the current queue.
This method should be continuously invoked from one of more threads
to maintain a populated queue.
"""
# With load_new_prob probability we chose not to reload a new image
load_new = np.random.rand() < self.load_new_prob or \
(self.unique_in_queue < 0.2 * self.queue.maxsize)
# Pick random image
found = False
while not found:
image = self.image_pair_loader.images[np.random.randint(len(self.image_pair_loader))]
already_loaded = bool(self.num_times_in_queue[image])
found = load_new != already_loaded
# Increment the image counter
self.num_times_in_queue[image] += 1
# If the image is not currently loaded, invoke the entry function
if getattr(image, "load_state", None) != self.entry_func[0]:
# Set load_state so that future calls dont try to load and
# preprocess again
image.load_state = self.entry_func[0]
# Call entry function
getattr(image, self.entry_func[0])(**self.entry_func[1])
# Add it to the queue, block indefinitely until spot is free
self.queue.put(image, block=True, timeout=None)
# Increment in-queue counter
self.items_in_queue += 1
|
backend_jbe.py
|
"""Construct a backend by using the Stanford Portable Library JAR.
This module handles the initialization of and connection to the backend JAR,
as well as all interprocess communications.
This specific backend should not be directly referenced by any client applications.
"""
from __future__ import print_function
__PLATFORM_INCLUDED = True
import subprocess
import time
import pathlib
import collections
import re
import sys
import os
import shlex
import threading
import queue
from campy.graphics import gevents
from campy.graphics import gtypes
from campy.util import strlib
from campy.system import error
from campy.private.backends.jbe.platformat import *
from campy.private.backends.jbe.platformatter import pformat
# Whether to print all pipe communications to stdout
DEBUG_PIPE = True
def boolalpha(b):
return "true" if b else "false"
class JavaBackend:
SOURCE_TABLE = None
TIMER_TABLE = None
WINDOW_TABLE = None
BACKEND = None
EVENT_QUEUE = None
Q = None
def __init__(self):
if(JavaBackend.BACKEND == None):
print("Initializing...")
from campy.private.backends.jbe.jbepipe import JavaBackendPipe
JavaBackend.BACKEND = JavaBackendPipe()
# self.startupMain()
# JavaBackend.Q = queue.Queue()
# def append_output_to_queue(out, queue):
# for line in iter(out.readline, b''):
# queue.put(line)
# out.close()
# t = threading.Thread(target=append_output_to_queue, args=(JavaBackend.BACKEND.stdout, JavaBackend.Q))
# t.daemon = True # thread dies with the program
# t.start()
JavaBackend.EVENT_QUEUE = collections.deque() # TODO: make me threadsafe
JavaBackend.SOURCE_TABLE = {}
JavaBackend.TIMER_TABLE = {}
JavaBackend.WINDOW_TABLE = {}
### Section: GWindow
def gwindow_add_to_region(self, gw, gobj, region):
command = pformat(GWindow_addToRegion, id=id(gw), gobj_id=id(gobj), region=region)
self.put_pipe(command)
def gwindow_constructor(self, gw, width, height, top_compound, visible=True):
JavaBackend.WINDOW_TABLE[id(gw)] = gw
command = pformat(GWindow_constructor, id=id(gw), width=width, height=height, top_compound=id(top_compound), visible=visible)
self.put_pipe(command)
self.get_status()
def gwindow_delete(self, gw):
del JavaBackend.WINDOW_TABLE[id(gw)]
command = pformat(GWindow_delete, id=id(gw))
self.put_pipe(command)
def gwindow_close(self, gw):
command = pformat(GWindow_close, id=id(gw))
self.put_pipe(command)
def gwindow_request_focus(self, gw):
command = pformat(GWindow_requestFocus, id=id(gw))
self.put_pipe(command)
def gwindow_set_exit_on_close(self, gw, exit_on_close):
command = pformat(GWindow_setExitOnClose, id=id(gw), value=exit_on_close)
self.put_pipe(command)
def gwindow_clear(self, gw, exit_on_close):
command = pformat(GWindow_clear, id=id(gw))
self.put_pipe(command)
def gwindow_clear_canvas(self, gw, exit_on_close):
command = pformat(GWindow_clearCanvas, id=id(gw))
self.put_pipe(command)
def gwindow_repaint(self, gw):
command = pformat(GWindow_repaint, id=id(gw))
self.put_pipe(command)
def gwindow_set_visible(self, flag, gobj = None, gw = None):
if(gw != None):
command = pformat(GWindow_setVisible, id=id(gw), flag=flag)
self.put_pipe(command)
elif(gobj != None):
command = pformat(GObject_setVisible, id=id(gobj), flag=flag)
self.put_pipe(command)
def gwindow_set_window_title(self, gw, title):
command = pformat(GWindow_setTitle, id=id(gw), title=strlib.quote_string(title))
self.put_pipe(command)
def gwindow_get_screen_width(self):
command = pformat(GWindow_getScreenWidth)
self.put_pipe(command)
return float(self.get_result())
def gwindow_get_screen_height(self):
command = pformat(GWindow_getScreenHeight)
self.put_pipe(command)
return float(self.get_result())
def gwindow_exit_graphics(self):
command = pformat(GWindow_exitGraphics)
self.put_pipe(command)
def gwindow_draw(self, gw, gobj):
command = pformat(GWindow_draw, id=id(gw), obj_id=id(gobj))
self.put_pipe(command)
def gwindow_set_region_alignment(self, gw, region, align):
command = pformat(GWindow_setRegionAlignmnet, id=id(gw), region=region, align=align)
self.put_pipe(command)
def gwindow_remove_from_region(self, gw, gobj, region):
command = pformat(GWindow_removeFromRegion, id=id(gw), obj_id=id(gobj), region=region)
self.put_pipe(command)
### SECTION: GObject
def gobject_set_location(self, gobj, x, y):
command = GObject_setLocation.format(id=id(gobj), x=x, y=y)
self.put_pipe(command)
def gobject_set_filled(self, gobj, flag):
command = pformat(GObject_setFilled, id=id(gobj), flag=flag)
self.put_pipe(command)
def gobject_remove(self, gobj):
command = pformat(GObject_remove, id=id(gobj))
self.put_pipe(command)
def gobject_set_color(self, gobj, color):
command = pformat(GObject_setColor, id=id(gobj), color=color)
self.put_pipe(command)
def gobject_set_fill_color(self, gobj, color):
command = pformat(GObject_setFillColor, id=id(gobj), color=color)
self.put_pipe(command)
def gobject_send_forward(self, gobj):
command = pformat(GObject_sendForward, id=id(gobj))
self.put_pipe(command)
def gobject_send_to_front(self, gobj):
command = pformat(GObject_sendToFront, id=id(gobj))
self.put_pipe(command)
def gobject_send_backward(self, gobj):
command = pformat(GObject_sendBackward, id=id(gobj))
self.put_pipe(command)
def gobject_send_to_back(self, gobj):
command = pformat(GObject_sendToBack, id=id(gobj))
self.put_pipe(command)
def gobject_set_size(self, gobj, width, height):
command = pformat(GObject_setSize, id=id(gobj), width=width, height=height)
self.put_pipe(command)
def gobject_get_bounds(self, gobj):
command = pformat(GObject_getBounds, id=id(gobj))
self.put_pipe(command)
result = self.get_result()
if (not result.startsWith("GRectangle(")): raise Exception(result)
return self.scanRectangle(result)
def gobject_set_line_width(self, gobj, line_width):
command = pformat(GObject_setLineWidth, id=id(gobj), line_width = line_width)
self.put_pipe(command)
def gobject_contains(self, gobj, x, y):
command = pformat(GObject_contains, id=id(gobj), x=x, y=y)
self.put_pipe(command)
return (self.get_result() == "true")
def gobject_scale(self, gobj, sx, sy):
command = pformat(GObject_scale, id=id(gobj), sx=sx, sy=sy)
self.put_pipe(command)
def gobject_rotate(self, gobj, theta):
command = pformat(GObject_rotate, id=id(gobj), theta=theta)
self.put_pipe(command)
### END SECTION: GObject
### SECTION: GRect
def grect_constructor(self, gobj, width, height):
command = pformat(GRect_constructor, id=id(gobj), width=width, height=height)
self.put_pipe(command)
### END SECTION: GRect
### SECTION: GRoundRect
def groundrect_constructor(self, gobj, width, height, corner):
command = pformat(GRoundRect_constructor, id=id(gobj), width=width, height=height, corner=corner)
self.put_pipe(command)
### END SECTION: GRoundRect
### SECTION: GCompound
def gcompound_constructor(self, gobj):
command = pformat(GCompound_constructor, id=id(gobj))
self.put_pipe(command)
def gcompound_add(self, compound, gobj):
command = pformat(GCompound_add, compound_id=id(compound), gobj_id=id(gobj))
self.put_pipe(command)
self.get_status()
### END SECTION: GCompound
### SECTION: G3DRect
def g3drect_constructor(self, gobj, width, height, raised):
command = pformat(G3DRect_constructor, id=id(gobj), width=width, height=height, raised=raised)
self.put_pipe(command)
def g3drect_set_raised(self, gobj, raised):
command = pformat(G3DRect_setRaised, id=id(gobj), raised=raised)
self.put_pipe(command)
### END SECTION: G3DRect
### SECTION: GOval
def goval_constructor(self, gobj, width, height):
command = pformat(GOval_constructor, id=id(gobj), width=width, height=height)
self.put_pipe(command)
### END SECTION: GOval
### SECTION: GArc
def garc_constructor(self, gobj, width, height, start, sweep):
command = pformat(GArc_constructor, id=id(gobj), width=width, height=height, start=start, sweep=sweep)
self.put_pipe(command)
def garc_set_start_angle(self, gobj, angle):
command = pformat(GArc_setStartAngle, id=id(gobj), angle=angle)
self.put_pipe(command)
def garc_set_sweep_angle(self, gobj, angle):
command = pformat(GArc_setSweepAngle, id=id(gobj), angle=angle)
self.put_pipe(command)
# TODO WTF is this method
def garc_set_frame_rectangle(self, gobj, x, y, width, height):
command = pformat(GArc_setFrameRectangle, id=id(gobj), x=x, y=y, width=width, height=height)
self.put_pipe(command)
### END SECTION: GArc
### SECTION: GLine
def gline_constructor(self, gobj, x1, y1, x2, y2):
command = pformat(GLine_constructor, id=id(gobj), x1=x1, y1=y1, x2=x2, y2=y2)
self.put_pipe(command)
def gline_set_start_point(self, gobj, x, y):
command = pformat(GLine_setStartPoint, id=id(gobj), x=x, y=y)
self.put_pipe(command)
def gline_set_end_point(self, gobj, x, y):
command = pformat(GLine_setEndPoint, id=id(gobj), x=x, y=y)
self.put_pipe(command)
### END SECTION: GLine
### SECTION: GImage
def gimage_constructor(self, gobj, filename):
if(filename[0] != "/" and filename[1:3] != ":\\"):
filename = os.getcwd() + os.sep + filename
for i in range(len(filename)):
if(filename[i] == "\\"):
filename = filename[:i] + "/" + filename[i+1:]
command = pformat(GImage_constructor, id=id(gobj), filename=filename)
self.put_pipe(command)
result = self.get_result()
if (not result.startswith("GDimension(")): raise Exception(result)
return self.scanDimension(result)
### END SECTION: GImage
### SECTION: GLabel
def glabel_constructor(self, gobj, label):
command = pformat(GLabel_constructor, id=id(gobj), label=label)
self.put_pipe(command)
def glabel_set_font(self, gobj, font):
command = pformat(GLabel_setFont, id=id(gobj), font=font)
self.put_pipe(command)
def glabel_set_label(self, gobj, str):
command = pformat(GLabel_setLabel, id=id(gobj), label=strlib.quote_string(str))
self.put_pipe(command);
def glabel_get_font_ascent(self, gobj):
command = pformat(GLabel_getFontAscent, id=id(gobj))
self.put_pipe(command)
return float(self.get_result())
def glabel_get_font_descent(self, gobj):
command = pformat(GLabel_getFontDescent, id=id(gobj))
self.put_pipe(command)
return float(self.get_result())
def glabel_get_size(self, gobj):
command = pformat(GLabel_getSize, id=id(gobj))
self.put_pipe(command)
# SO BROKEN
return self.scanDimension(self.get_result())
### END SECTION: GLabel
### SECTION: GPolygon
def gpolygon_constructor(self, gobj):
command = pformat(GPolygon_constructor, id=id(gobj))
self.put_pipe(command)
def gpolygon_add_vertex(self, gobj, x, y):
command = pformat(GPolygon_addVertex, id=id(gobj), x=x, y=y)
self.put_pipe(command)
### END SECTION: GPolygon
### Section: GTimer
def gtimer_constructor(self, timer, millis):
JavaBackend.TIMER_TABLE[id(timer)] = timer # TODO: why?
command = pformat(GTimer_constructor, id=id(timer), millis=millis)
self.put_pipe(command)
def gtimer_delete(self, timer):
del JavaBackend.TIMER_TABLE[id(timer)] # TODO: why?
command = pformat(GTimer_delete, id=id(timer))
self.put_pipe(command)
def gtimer_start(self, timer):
command = pformat(GTimer_start, id=id(timer))
self.put_pipe(command)
def gtimer_pause(self, millis):
command = pformat(GTimer_pause, millis=millis)
self.put_pipe(command)
self.get_status() # TODO: wtf
def gtimer_stop(self, timer):
command = pformat(GTimer_stop, id=id(timer))
self.put_pipe(command)
### End Section: GTimer
### Section: GBufferedImage
def gbufferedimage_constructor(self, gobj, x, y, width, height):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GBufferedImage_constructor, id=id(gobj), x=int(x), y=int(y), width=int(width), height=int(height))
self.put_pipe(command)
def gbufferedimage_fill(self, gobj, rgb):
command = pformat(GBufferedImage_fill, id=id(gobj), rgb=rgb)
self.put_pipe(command)
def gbufferedimage_fill_region(self, gobj, x, y, width, height, rgb):
command = pformat(GBufferedImage_fillRegion, id=id(gobj), x=int(x), y=int(y), width=int(width), height=int(height), rgb=rgb)
self.put_pipe(command)
def gbufferedimage_load(self, gobj, filename):
command = pformat(GBufferedImage_load, id=id(gobj), filename=filename)
self.put_pipe(command)
return self.get_result()
def gbufferedimage_resize(self, gobj, width, height, retain):
command = pformat(GBufferedImage_resize, id=id(gobj), width=int(width), height=int(height), retain=retain)
self.put_pipe(command)
def gbufferedimage_save(self, gobj, filename):
command = pformat(GBufferedImage_save, id=id(gobj), filename=filename)
self.put_pipe(command)
self.get_status() # ???
def gbufferedimage_set_rgb(self, gobj, x, y, rgb):
command = pformat(GBufferedImage_setRGB, id=id(gobj), x=int(x), y=int(y), rgb=rgb)
self.put_pipe(command)
def gbufferedimage_update_all_pixels(self, gobj, base64):
command = pformat(GBufferedImage_updateAllPixels, id=id(gobj), base64=base64)
self.put_pipe(command)
### End Section: GBufferedImage
### Section: Sound
def create_sound(self, sound, filename):
# if(filename[0] != "/" and filename[1:3] != ":\\"):
# filename = os.getcwd() + os.sep + filename
# for i in range(len(filename)):
# if(filename[i] == "\\"):
# filename = filename[:i] + "/" + filename[i+1:]
command = pformat(Sound_create, id=id(sound), filename=filename)
self.put_pipe(command)
# print(self.get_result())
def delete_sound(self, sound):
command = pformat(Sound_delete, id=id(sound))
self.put_pipe(command)
def play_sound(self, sound):
command = pformat(Sound_play, id=id(sound))
self.put_pipe(command)
### END SECTION: Sound
### SECTION: JBEConsole
def clearConsole(self):
command = pformat(JBEConsole_clear)
self.put_pipe(command)
def setConsoleFont(self, font):
command = pformat(JBEConsole_setFont, font=font)
self.put_pipe(command)
def setConsoleSize(self, width, height):
command = pformat(JBEConsole_setSize, width=width, height=height)
self.put_pipe(command)
### END SECTION: JBEConsole
### SECTION: GInteractor
def setActionCommand(self, gobj, cmd):
command = pformat(GInteractor_setActionCommand, id=id(gobj), cmd=cmd)
self.put_pipe(command)
def getSize(self, gobj):
command = pformat(GInteractor_getSize, id=id(gobj))
self.put_pipe(command)
return self.scanDimension(self.get_result())
def gbutton_constructor(self, gobj, label):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GButton_constructor, id=id(gobj), label=label)
self.put_pipe(command)
def gcheckbox_constructor(self, gobj, label):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GCheckBox_constructor, id=id(gobj), label=label)
self.put_pipe(command)
def gcheckbox_is_selected(self, gobj):
command = pformat(GCheckBox_isSelected, id=id(gobj))
self.put_pipe(command)
result = self.get_result().strip()
return result == "true"
def gcheckbox_set_selected(self, gobj, state):
command = pformat(GCheckBox_setSelected, id=id(gobj), state=state)
self.put_pipe(command)
def gslider_constructor(self, gobj, min, max, value):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GSlider_constructor, id=id(gobj), min=min, max=max, value=value)
self.put_pipe(command)
def gslider_get_value(self, gobj):
command = pformat(GSlider_getValue, id=id(gobj))
self.put_pipe(command)
return int(self.get_result())
def gslider_set_value(self, gobj, value):
command = pformat(GSlider_setValue, id=id(gobj), value=value)
self.put_pipe(command)
def createGTextField(self, gobj, num_chars):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GTextField_constructor, id=id(gobj), num_chars=num_chars)
self.put_pipe(command)
def getText(self, gobj):
command = pformat(GTextField_getText, id=id(gobj))
self.put_pipe(command)
return self.get_result()
def setText(self, gobj, str):
command = pformat(GTextField_setText, id=id(gobj), text=strlib.quote_string(str))
self.put_pipe(command)
def createGChooser(self, gobj):
JavaBackend.SOURCE_TABLE[id(gobj)] = gobj
command = pformat(GChooser_constructor, id=id(gobj))
self.put_pipe(command)
def addItem(self, gobj, item):
command = pformat(GChooser_addItem, id=id(gobj), item=strlib.quote_string(item))
self.put_pipe(command)
def getSelectedItem(self, gobj):
command = pformat(GChooser_getSelectedItem, id=id(gobj))
self.put_pipe(command)
return self.get_result()
def setSelectedItem(self, gobj, item):
command = pformat(GChooser_setSelectedItem, id=id(gobj), item=strlib.quote_string(item))
self.put_pipe(command)
### END SECTION: GInteractor
def file_open_file_dialog(self, title, mode, path):
# TODO: BUGFIX for trailing slashes
command = pformat(File_openFileDialog, title=title, mode=mode, path=path)
self.put_pipe(command)
return self.get_result()
def gfilechooser_show_open_dialog(self, current_dir, file_filter):
command = pformat(GFileChooser_showOpenDialog,
current_dir=current_dir,
file_filter=file_filter
)
self.put_pipe(command)
return self.get_result()
def gfilechooser_show_save_dialog(self, current_dir, file_filter):
command = pformat(GFileChooser_showOpenDialog,
current_dir=current_dir,
file_filter=file_filter
)
self.put_pipe(command)
return self.get_result()
def goptionpane_show_confirm_dialog(self, message, title, confirm_type):
command = pformat(GOptionPane_showConfirmDialog,
message=message,
title=title,
type=confirm_type.value
)
self.put_pipe(command)
result = self.get_result()
return int(result)
def goptionpane_show_input_dialog(self, message, title):
command = pformat(GOptionPane_showInputDialog,
message=message,
title=title
)
self.put_pipe(command)
result = self.get_result()
return strlib.url_decode(result)
def goptionpane_show_message_dialog(self, message, title, message_type):
command = pformat(GOptionPane_showMessageDialog,
message=message,
title=title,
type=message_type.value
)
self.put_pipe(command)
self.get_result() # Wait for dialog to close
def goptionpane_show_option_dialog(self, message, title, options, initially_selected):
command = pformat(GOptionPane_showOptionDialog,
message=message,
title=title,
options=', '.join(map(strlib.quote_string, map(strlib.url_encode, map(str, options)))),
initial=initially_selected
)
self.put_pipe(command)
result = self.get_result()
return int(result)
def goptionpane_show_text_file_dialog(self, message, title, rows, cols):
command = pformat(GOptionPane_showTextFileDialog,
message=strlib.quote_string(strlib.url_encode(message)),
title=strlib.quote_string(strlib.url_encode(title)),
rows=rows,
cols=cols
)
self.put_pipe(command)
self.get_result() # Wait for dialog to close
def note_play(self, note, repeat):
note = str(note) + boolalpha(repeat)
command = pformat(Note_play,
note=note
)
self.put_pipe(command)
self.get_result() # Wait for playing to be done
##############################
# Section: Event Interaction
# ----------------------------
# This section implements interaction with the JBE console to process Events
def getNextEvent(self, mask):
if not JavaBackend.EVENT_QUEUE:
command = pformat(GEvent_getNextEvent, mask=mask.value)
self.put_pipe(command)
self.get_result(consume_acks=True, stop_on_event=True) # Will add to EVENT_QUEUE?
if not JavaBackend.EVENT_QUEUE:
# TODO: hotfix for lecture 9.1
return gevents.GEvent()
return None
return JavaBackend.EVENT_QUEUE.popleft()
def waitForEvent(self, mask):
while not JavaBackend.EVENT_QUEUE:
command = pformat(GEvent_waitForEvent, mask=mask.value)
self.put_pipe(command)
self.get_result()
return JavaBackend.EVENT_QUEUE.popleft()
def parseEvent(self, line):
try:
# TODO(sredmond): This is a broken way to parse tokens.
# Breaks when an event parameter has a space in it.
tokens = re.findall(r"[-\w\.\(]+", line)
if(tokens[0] == "mousePressed("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.MOUSE_PRESSED)
elif(tokens[0] == "mouseReleased("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.MOUSE_RELEASED)
elif(tokens[0] == "mouseClicked("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.MOUSE_CLICKED)
elif(tokens[0] == "mouseMoved("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.MOUSE_MOVED)
elif(tokens[0] == "mouseDragged("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.MOUSE_DRAGGED)
elif(tokens[0] == "keyPressed("):
return self.parseKeyEvent(tokens[1:], gevents.EventType.KEY_PRESSED)
elif(tokens[0] == "keyReleased("):
return self.parseMouseEvent(tokens[1:], gevents.EventType.KEY_RELEASED)
elif(tokens[0] == "keyTyped("):
return self.parseKeyEvent(tokens[1:], gevents.EventType.KEY_TYPED)
elif(tokens[0] == "actionPerformed("):
return self.parseActionEvent(tokens[1:], gevents.EventType.ACTION_PERFORMED)
elif(tokens[0] == "timerTicked("):
return self.parseTimerEvent(tokens[1:], gevents.EventType.TIMER_TICKED)
elif(tokens[0] == "windowClosed("):
return self.parseWindowEvent(tokens[1:], gevents.EventType.WINDOW_CLOSED)
elif(tokens[0] == "windowResized("):
return self.parseWindowEvent(tokens[1:], gevents.EventType.RESIZED)
elif(tokens[0] == "lastWindowClosed("):
print("Exited normally")
sys.exit(0)
else:
dummy = 1
# ignore for now
return gevents.GEvent()
except Exception as inst:
print("EXCEPTION")
print("type:")
print(type(inst))
print("exception data:")
print(inst)
print("line:")
print(line)
raise
return gevents.GEvent()
def parseMouseEvent(self, tokens, type):
from campy.graphics import gwindow
id = int(tokens[0])
tokens = tokens[1:]
time = float(tokens[0])
tokens = tokens[1:]
modifiers = int(tokens[0])
tokens = tokens[1:]
x = float(tokens[0])
tokens = tokens[1:]
y = float(tokens[0])
tokens = tokens[1:]
print(JavaBackend.WINDOW_TABLE)
e = gevents.GMouseEvent(type, \
JavaBackend.WINDOW_TABLE[id], \
x, \
y)
# Manually set the internals of the GEvent.
e._time = time
e._modifiers = modifiers
return e
def parseKeyEvent(self, tokens, type):
from campy.graphics import gwindow
id = int(tokens[0])
tokens = tokens[1:]
time = float(tokens[0])
tokens = tokens[1:]
modifiers = int(tokens[0])
tokens = tokens[1:]
keyChar = int(tokens[0])
tokens = tokens[1:]
keyCode = int(tokens[0])
tokens = tokens[1:]
e = gevents.GKeyEvent(type, \
gwindow.GWindow(gwd = JavaBackend.WINDOW_TABLE[id]), \
keyChar, \
keyCode)
# Manually set the internals of the GEvent.
e._time = time
e._modifiers = modifiers
return e
def parseTimerEvent(self, tokens, type):
id = int(tokens[0])
tokens = tokens[1:]
time = float(tokens[0])
tokens = tokens[1:]
e = gevents.GTimerEvent(type, JavaBackend.TIMER_TABLE[id])
# Manually set the internals of the GEvent.
e._time = time
return e
def parseWindowEvent(self, tokens, type):
from campy.graphics import gwindow
id = tokens[0]
tokens = tokens[1:]
time = float(tokens[0])
tokens = tokens[1:]
e = gevents.GWindowEvent(type, gwindow.GWindow(JavaBackend.WINDOW_TABLE[id]))
# Manually set the internals of the GEvent.
e._time = time
return e
def parseActionEvent(self, tokens, type):
id = int(tokens[0])
tokens = tokens[1:]
action = tokens[0]
tokens = tokens[1:]
time = float(tokens[0])
tokens = tokens[1:]
e = gevents.GActionEvent(type, JavaBackend.SOURCE_TABLE[id], action)
# Manually set the internals of the GEvent.
e._time = time
return e
def scanDimension(self, str):
tokens = re.findall(r"[-:\w\.]+", str)
#skip "GDimension"
tokens = tokens[1:]
width = float(tokens[0])
tokens = tokens[1:]
height = float(tokens[0])
return gtypes.GDimension(width, height)
def scanRectangle(self, str):
tokens = re.findall(r"[-:\w\.]+", str)
#skip "GRectangle"
tokens = tokens[1:]
x = float(tokens[0])
tokens = tokens[1:]
y = float(tokens[0])
tokens = tokens[1:]
width = float(tokens[0])
tokens = tokens[1:]
height = float(tokens[0])
return gtypes.GRectangle(x, y, width, height)
##############################
# Section: Console Interaction
# ----------------------------
# This section implements interaction with the JBE console for the Console class
def get_line_console(self):
self.put_pipe(pformat(JBEConsole_getLine))
result = self.get_result(consume_acks=True, caller='get_line_console')
self.echo_console(result + '\n') # TODO: wrong for multiple inputs on one line?
return result
def put_console(self, line, stderr=False):
# BUGFIX: strings that end with '\' don't print because of back-end error;
# kludge fix by appending an "invisible" space after it
if line.endswith('\\'):
line += ' '
self.put_pipe(pformat(JBEConsole_print,
line=line,
stderr=stderr
))
self.echo_console(line, stderr)
def echo_console(self, line, stderr=False):
if True: # getConsoleEcho()
needs_flush = '\n' in line
if needs_flush:
sys.stdout.flush()
(sys.stdout if not stderr else sys.stderr).write(line)
if needs_flush:
sys.stdout.flush()
sys.stderr.flush()
def end_line_console(self, stderr):
self.put_pipe(pformat(JBEConsole_println))
self.echo_console('\n', stderr)
################################
# Section: Backend Communication
# ------------------------------
# The following section implements utility functions to communicate with the
# Java backend process.
def put_pipe(self, command):
self.BACKEND.write(command)
def get_pipe(self):
return self.BACKEND.read()
def get_status(self):
return self.BACKEND.get_status()
def get_result(self):
return self.BACKEND.get_result()
# def put_pipe(self, command):
# # print(command)
# cmd = command + '\n'
# # out = JavaBackend.BACKEND.communicate(input=command+"\n", timeout=1)[0]
# if DEBUG_PIPE:
# print(cmd)
# JavaBackend.BACKEND.stdin.write(cmd)
# JavaBackend.BACKEND.stdin.flush()
# def get_pipe(self):
# return JavaBackend.Q.get()
# # return JavaBackend.BACKEND.stdout.readline()
# def get_status(self):
# result = self.get_result()
# if result != 'ok':
# error(result)
# # TODO: check for whitespace returned at start or finish
# def get_result(self, consume_acks=True, stop_on_event=False, caller=''):
# while True:
# if DEBUG_PIPE:
# print('getResult(): calling getPipe()...', file=sys.stderr, flush=True)
# line = self.get_pipe()
# if DEBUG_PIPE:
# print(line)
# is_result = line.startswith('result:')
# is_result_long = line.startswith('result_long:')
# is_event = line.startswith('event:')
# is_ack = line.startswith('result:___jbe___ack___')
# has_acm_exception = 'acm.util.ErrorException' in line
# has_exception = 'xception' in line
# has_error = 'Unexpected error' in line
# if is_result_long:
# # Read a long result (sent across multiple lines)
# result = ''
# next_line = self.get_pipe()
# while next_line != 'result_long:end':
# if not line.startswith('result:___jbe___ack___'):
# result += line
# if DEBUG_PIPE:
# print('getResult(): appended line (length so far: {})'.format(len(result)), file=sys.stderr, flush=True)
# next_line = self.get_pipe()
# if DEBUG_PIPE:
# print('getResult(): returning long strings "{}...{}" (length {})'.format(result[:10], result[-10:], len(result)), file=sys.stderr, flush=True)
# return result
# elif ((is_result or is_event) and has_acm_exception) or (not is_result and not is_event and (has_exception or has_error)):
# # Read an error message from the back-end
# if is_result:
# line = line[7:] # Prune 'result:'
# elif is_event:
# line = line[6:] # Prune 'event:'
# result = 'ERROR emitted from Stanford Java back-end process\n{}'.format(line)
# error(result) # TODO: import error
# elif is_result:
# # Read a regular result
# if not is_ack or not consume_acks:
# result = line[7:] # Prune 'result:'
# if DEBUG_PIPE:
# print('getResult(): returning regular result (length {}) {}'.format(len(result), repr(result)), file=sys.stderr, flush=True)
# return result.strip()
# else:
# # Just an acknowledgement of some previous event: not a real result.
# if DEBUG_PIPE:
# print('getResult(): saw ACK (length {}) "{}"'.format(len(line), line), file=sys.stderr, flush=True)
# elif is_event:
# # Read a Java-originated event; enqueue it to process here.
# event = self.parseEvent(line[6:].strip())
# JavaBackend.EVENT_QUEUE.append(event)
# if stop_on_event or (event.event_class == gevents.EventClassType.WINDOW_EVENT and event.event_type == gevents.EventType.CONSOLE_CLOSED and caller == 'get_line_console'):
# return ''
# else:
# if '\tat ' in line or ' at ' in line:
# # a line from a back-end Java exception stack trace;
# # shouldn't really be happening, but back end isn't perfect.
# # echo it here to STDERR so Python user can see it to help diagnose the issue
# print(line, file=sys.stderr, flush=True)
# def startupMain(self):
# spl_location = pathlib.Path(__file__).parent / 'spl.jar'
# # TODO = actually communicate with the jar
# args = shlex.split('java -jar {}'.format(spl_location))
# import sys
# backend = subprocess.Popen(args, \
# shell=False, \
# stdin=subprocess.PIPE, \
# stdout=subprocess.PIPE, \
# stderr=sys.stdout, \
# universal_newlines=True)
# JavaBackend.BACKEND = backend
|
interface.py
|
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if not kwargs.get('cli_execenv_cmd') and (kwargs.get('streamer') not in ('worker', 'process')):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param cli_execenv_cmd: Tells Ansible Runner to emulate the CLI of Ansible by prepping an Execution Environment and then passing the user provided cmdline
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type cli_execenv_cmd: str
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
|
server.py
|
'''https://www.youtube.com/watch?v=3QiPPX-KeSc'''
import socket
import threading
SERVER = socket.gethostbyname(socket.gethostname())
# automatically gets the server IP addr equi to SERVER = "192.168.20.7"
# print("Server ip address is: ", SERVER)
# print("Server name is: ", socket.gethostname())
# To connect from outside your network >> SERVER = "public ip address"
HEADER = 64
PORT = 5050
ADDR = (SERVER, PORT)
FORMAT = "utf-8"
DISCONNECT_MESSSAGE = "DISCONNECT!"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # (IPV4, TCP)
server.bind(ADDR)
def handle_client(conn, addr):
# this will run concurrently for each client
print(f"[NEW CONNECTION] {addr} connected!")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT) # blocking code
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.send("Message received".encode(FORMAT))
conn.close()
def start():
#start listening for connection
print(f"[LISTENING] Server is listening on {SERVER}")
server.listen()
while True:
conn, addr = server.accept() # blocking code
thread = threading.Thread(target=handle_client, args=(conn,addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
# amount of clients connected, subtract 1
# which represents the start thread
print("[STARTING] server is starting...")
start()
|
pairs2vocab.py
|
import argparse
import os
import ioutils
from multiprocessing import Process, Queue
from Queue import Empty
from representations.matrix_serializer import save_count_vocabulary
import six
import sys
def worker(proc_num, queue, out_dir, in_dir):
while True:
try:
year = queue.get(block=False)
except Empty:
break
print proc_num, "pairs2vocab for year", year
words_path = out_dir + str(year) + "-w.vocab"
contexts_path = out_dir + str(year) + "-c.vocab"
words = {} #center word vocabulary
contexts = {} #context vocabulary
print proc_num, "Processing pairs for year", year
with open(in_dir + str(year) + ".txt") as f:
pairs_num = 0
for line in f:
pairs_num += 1
if pairs_num % 1000**2 == 0:
print str(int(pairs_num/1000**2)) + "M pairs processed."
pair = line.strip().split()
if pair[0] not in words :
words[pair[0]] = 1
else:
words[pair[0]] += 1
if pair[1] not in contexts :
contexts[pair[1]] = 1
else:
contexts[pair[1]] += 1
words = sorted(six.iteritems(words), key=lambda item: item[1], reverse=True)
contexts = sorted(six.iteritems(contexts), key=lambda item: item[1], reverse=True)
save_count_vocabulary(words_path, words)
save_count_vocabulary(contexts_path, contexts)
print ("words size: " + str(len(words)))
print ("contexts size: " + str(len(contexts)))
print ("number of pairs: " + str(pairs_num))
print ("pairs2vocab finished")
def run_parallel(num_procs, out_dir, in_dir, years):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, out_dir, in_dir]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Computes various frequency statistics.")
parser.add_argument("out_dir", help="output directory for vocab ")
parser.add_argument("in_dir", help="directory for ngrams pairs")
parser.add_argument("--workers", type=int, default=10)
parser.add_argument("--start-year", type=int, help="start year (inclusive)", default=1800)
parser.add_argument("--end-year", type=int, help="end year (inclusive)", default=2000)
parser.add_argument("--year-inc", type=int, help="end year (inclusive)", default=1)
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
ioutils.mkdir(args.out_dir)
run_parallel(args.workers, args.out_dir + "/", args.in_dir + "/", years)
|
script.py
|
from __future__ import absolute_import
import os, traceback, threading, shlex
from . import controller
class ScriptError(Exception):
pass
class ScriptContext:
def __init__(self, master):
self._master = master
def log(self, message, level="info"):
"""
Logs an event.
By default, only events with level "error" get displayed. This can be controlled with the "-v" switch.
How log messages are handled depends on the front-end. mitmdump will print them to stdout,
mitmproxy sends output to the eventlog for display ("e" keyboard shortcut).
"""
self._master.add_event(message, level)
def kill_flow(self, f):
"""
Kills a flow immediately. No further data will be sent to the client or the server.
"""
f.kill(self._master)
def duplicate_flow(self, f):
"""
Returns a duplicate of the specified flow. The flow is also
injected into the current state, and is ready for editing, replay,
etc.
"""
self._master.pause_scripts = True
f = self._master.duplicate_flow(f)
self._master.pause_scripts = False
return f
def replay_request(self, f):
"""
Replay the request on the current flow. The response will be added
to the flow object.
"""
return self._master.replay_request(f, block=True, run_scripthooks=False)
@property
def app_registry(self):
return self._master.apps
class Script:
"""
The instantiator should do something along this vein:
s = Script(argv, master)
s.load()
"""
def __init__(self, command, master):
self.command = command
self.argv = self.parse_command(command)
self.ctx = ScriptContext(master)
self.ns = None
self.load()
@classmethod
def parse_command(klass, command):
if not command or not command.strip():
raise ScriptError("Empty script command.")
if os.name == "nt": # Windows: escape all backslashes in the path.
backslashes = shlex.split(command, posix=False)[0].count("\\")
command = command.replace("\\", "\\\\", backslashes)
args = shlex.split(command)
args[0] = os.path.expanduser(args[0])
if not os.path.exists(args[0]):
raise ScriptError(("Script file not found: %s.\r\n"
"If you script path contains spaces, "
"make sure to wrap it in additional quotes, e.g. -s \"'./foo bar/baz.py' --args\".") % args[0])
elif not os.path.isfile(args[0]):
raise ScriptError("Not a file: %s" % args[0])
return args
def load(self):
"""
Loads a module.
Raises ScriptError on failure, with argument equal to an error
message that may be a formatted traceback.
"""
ns = {}
try:
execfile(self.argv[0], ns, ns)
except Exception, v:
raise ScriptError(traceback.format_exc(v))
self.ns = ns
r = self.run("start", self.argv)
if not r[0] and r[1]:
raise ScriptError(r[1][1])
def unload(self):
return self.run("done")
def run(self, name, *args, **kwargs):
"""
Runs a plugin method.
Returns:
(True, retval) on success.
(False, None) on nonexistent method.
(False, (exc, traceback string)) if there was an exception.
"""
f = self.ns.get(name)
if f:
try:
return (True, f(self.ctx, *args, **kwargs))
except Exception, v:
return (False, (v, traceback.format_exc(v)))
else:
return (False, None)
class ReplyProxy(object):
def __init__(self, original_reply):
self._ignore_calls = 1
self.lock = threading.Lock()
self.original_reply = original_reply
def __call__(self, *args, **kwargs):
with self.lock:
if self._ignore_calls > 0:
self._ignore_calls -= 1
return
self.original_reply(*args, **kwargs)
def __getattr__ (self, k):
return getattr(self.original_reply, k)
def _handle_concurrent_reply(fn, o, *args, **kwargs):
# Make first call to o.reply a no op
reply_proxy = ReplyProxy(o.reply)
o.reply = reply_proxy
def run():
fn(*args, **kwargs)
reply_proxy() # If the script did not call .reply(), we have to do it now.
ScriptThread(target=run).start()
class ScriptThread(threading.Thread):
name = "ScriptThread"
def concurrent(fn):
if fn.func_name in ("request", "response", "error", "clientconnect", "serverconnect", "clientdisconnect"):
def _concurrent(ctx, obj):
_handle_concurrent_reply(fn, obj, ctx, obj)
return _concurrent
raise NotImplementedError("Concurrent decorator not supported for this method.")
|
main_server.py
|
from pickle import NONE
import socketserver
import socket
import threading
import tkinter
import zmq
import base64
import cv2
class MyHandler(socketserver.BaseRequestHandler):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
users = {}
print((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0])
def send_all_message(self, msg):
for sock, _ in self.users.values():
sock.send(msg.encode())
def handle(self):
print(self.client_address)
while True:
self.request.send("채팅 닉네임을 입력하세요".encode())
nickname = self.request.recv(1024).decode()
if nickname in self.users:
self.request.send("이미 등록된 닉네임 입니다.\n".encode())
else:
self.users[nickname] = (self.request, self.client_address)
print("현재 {} 명 참여중".format(len(self.users)))
self.send_all_message("[{}] 님이 입장 했습니다.".format(nickname))
break
while True:
msg = self.request.recv(1024)
if msg.decode() == "/bye":
self.request.close()
break
self.send_all_message("[{}] {}".format(nickname, msg.decode()))
if nickname in self.users:
del self.users[nickname]
self.send_all_message("[{}]님이 퇴장하였습니다.".format(nickname))
print("현재 {} 명 참여중".format(len(self.users)))
class ChatServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class ChatClient:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP = ""
PORT = 0
input_string = ""
input_msg = ""
chat_list = None
window = None
def recv_message(self,sock):
while True:
msg = sock.recv(1024)
self.chat_list.insert(tkinter.END, msg.decode())
self.chat_list.see(tkinter.END)
def connect(self, event=None):
connect_string = self.input_string.get()
addr = connect_string.split(":")
self.IP = addr[0]
self.PORT = int(addr[1])
self.w_connect.destroy()
def send_message(self, event=None):
msg = self.input_msg.get()
self.sock.send(msg.encode())
self.input_msg.set("")
if msg == "/bye":
self.sock.close()
self.window.quit()
def connect_gui(self):
#접속 창
w_connect = tkinter.Tk()
w_connect.title("접속대상")
tkinter.Label(w_connect, text="접속대상").grid(row = 0, column = 0)
input_string = tkinter.StringVar(value="127.0.0.1:10000")
input_addr = tkinter.Entry(w_connect, textvariable=input_string, width=20)
input_addr.grid(row=0, column=1, padx=5, pady=5)
c_button = tkinter.Button(w_connect, text="접속하기",command=self.connect)
c_button.grid(row=0, column= 2, padx=5, pady=5)
width = 280
height = 45
screen_width = w_connect.winfo_screenwidth() #컴퓨터 해상도 계산
screen_height = w_connect.winfo_screenheight()
x = int((screen_width / 2) - (width / 2)) #컴퓨터 해상도 계산하여 가운데의 좌표값을 계산
y = int((screen_height / 2) - (height / 2))
w_connect.geometry('{}x{}+{}+{}'.format(width, height, x, y)) #창을 실행하였을때 실행할 위치를 지정
w_connect.mainloop()
self.run()
def run(self):
# 채팅 구문
self.window = tkinter.Tk()
self.window.title("클라이언트")
cg_frame = tkinter.Frame(self.window)
scroll = tkinter.Scrollbar(cg_frame)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.chat_list = tkinter.Listbox(cg_frame, height=15, width=50, yscrollcommand= scroll.set)
self.chat_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH, padx=5, pady=5)
cg_frame.pack()
self.input_msg = tkinter.StringVar()
inputbox = tkinter.Entry(self.window, textvariable=self.input_msg)
inputbox.bind("<Return>", self.send_message)
inputbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=tkinter.YES, padx=5, pady=5)
send_button = tkinter.Button(self.window, text="전송",command=self.send_message)
send_button.pack(side=tkinter.RIGHT, fill=tkinter.X, padx=5, pady=5)
# 소켓 생성과 스레드 작동부분
self.sock.connect((self.IP, self.PORT))
th = threading.Thread(target=self.recv_message, args=(self.sock,))
th.daemon = True
th.start()
self.window.mainloop()
class Streamer:
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
camera = cv2.VideoCapture(0)
def __init__(self):
self.footage_socket.bind('tcp://*:5555') # tcp통신으로 5555포트를 이용하여 통신한다.
def video(self):
while True:
try:
grabbed, frame = self.camera.read() # 현재 카메라의 프레임을 가져온다.
frame = cv2.resize(frame, (640, 480)) # 프레임의 크기를 조정한다.
encoded, buffer = cv2.imencode('.jpg', frame) # 프레임을 jpg 파일로 전환하고
jpg_as_text = base64.b64encode(buffer) # 변환된 파일을 base64 형태로 인코딩 후에
self.footage_socket.send(jpg_as_text) # base64형태로 전송한다.
except KeyboardInterrupt: # KeyboardInterrupt가 일어나면 프로그램을 종료한다.
self.camera.release()
cv2.destroyAllWindows()
break
def run(self):
videoThread=threading.Thread(target=self.video)
videoThread.daemon=True
videoThread.start()
streamer = Streamer()
streamer.run()
Chat_server = ChatServer(("", 10000), MyHandler)
Chat_server.serve_forever()
Chat_server.shutdown()
Chat_server.server_close()
|
read_only_vertex_bench.py
|
#! /usr/bin/env python
#
# ===============================================================
# Description: Read-only multi-client benchmark which only
# reads 1 vertex per query.
#
# Created: 2014-03-21 13:39:06
#
# Author: Ayush Dubey, dubey@cs.cornell.edu
#
# Copyright (C) 2013-2014, Cornell University, see the LICENSE
# file for licensing agreement
# ===============================================================
#
import random
import sys
import time
import threading
import weaver.client as client
num_started = 0
num_finished = 0
cv = threading.Condition()
num_requests = 10000
num_nodes = 81306 # snap twitter-combined
# node handles are range(0, num_nodes)
num_clients = 64
def exec_reads(reqs, cl, exec_time, idx):
global num_started
global cv
global num_clients
global num_finished
with cv:
while num_started < num_clients:
cv.wait()
rp = client.ReadNodePropsParams()
start = time.time()
cnt = 0
for r in reqs:
cnt += 1
prog_args = [(r, rp)]
response = cl.read_node_props(prog_args)
#if cnt % 1000 == 0:
# print 'done ' + str(cnt) + ' by client ' + str(idx)
end = time.time()
with cv:
num_finished += 1
cv.notify_all()
exec_time[idx] = end - start
clients = []
for i in range(num_clients):
clients.append(client.Client('128.84.167.101', 2002))
# randomly write node props
# with p = 0.50 nodes have 0 props
# with p = 0.25 nodes have 1 props
# with p = 0.25 nodes have 2 props
if len(sys.argv) > 1:
write_nodes = (num_nodes / 1000) * 1000
tx_id = 0
c = clients[0]
tx_sz = 1000
for n in range(write_nodes):
if n % tx_sz == 0:
tx_id = c.begin_tx()
coin_toss = random.random()
if coin_toss > 0.50:
c.set_node_property(str(n), 'color', 'blue')
if coin_toss > 0.75:
c.set_node_property(str(n), 'type', 'photo')
if n % tx_sz == (tx_sz-1):
c.end_tx()
print 'initial write thread processed ' + str(n+1) + ' nodes'
reqs = []
for i in range(num_clients):
cl_reqs = []
for numr in range(num_requests):
cl_reqs.append(str(random.randint(0, num_nodes-1)))
reqs.append(cl_reqs)
exec_time = [0] * num_clients
threads = []
#print "starting requests"
for i in range(num_clients):
thr = threading.Thread(target=exec_reads, args=(reqs[i], clients[i], exec_time, i))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time-start_time
for thr in threads:
thr.join()
#print 'Total time = ' + str(total_time)
#throughput = (num_requests * num_clients) / total_time
#print 'Throughput = ' + str(throughput)
print num_requests*num_clients,total_time
|
command.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import enum
import json
import logging
import os
import re
import resource
import signal
import subprocess
import threading
from abc import ABC, abstractmethod
from typing import Iterable, List, Optional
from .. import (
find_local_root,
find_log_directory,
find_project_root,
is_capable_terminal,
json_rpc,
log,
readable_directory,
)
from ..analysis_directory import AnalysisDirectory, resolve_analysis_directory
from ..configuration import Configuration
from ..exceptions import EnvironmentException
from ..filesystem import remove_if_exists, translate_path
from ..process import register_non_unique_process
from ..socket_connection import SocketConnection, SocketException
TEXT: str = "text"
JSON: str = "json"
LOG: logging.Logger = logging.getLogger(__name__)
class ClientException(Exception):
pass
class State(enum.IntEnum):
DEAD = 0
RUNNING = 1
class ExitCode(enum.IntEnum):
SUCCESS = 0
FOUND_ERRORS = 1
FAILURE = 2
BUCK_ERROR = 3
# If the process exited due to a signal, this will be the negative signal number.
SIGSEGV = -signal.SIGSEGV
class IncrementalStyle(enum.Enum):
SHALLOW = "shallow"
FINE_GRAINED = "fine_grained"
def __str__(self) -> str:
return self.value
class ProfileOutput(enum.Enum):
TRACE_EVENT: str = "trace_event"
COLD_START_PHASES: str = "cold_start_phases"
INCREMENTAL_UPDATES: str = "incremental_updates"
def __str__(self) -> str:
return self.value
class Result:
def __init__(self, code: int, output: str) -> None:
self.code: int = code
self.output: str = output
def check(self) -> None:
if self.code != ExitCode.SUCCESS:
description = ":\n{}".format(self.output) if self.output else ""
if self.code == ExitCode.SIGSEGV:
description += (
"\nThis is a Pyre bug. Please re-run Pyre with --debug "
"and provide the output to the developers."
)
raise ClientException(
"Client exited with error code {}{}".format(self.code, description)
)
def typeshed_search_path(typeshed_root: str) -> List[str]:
search_path = []
typeshed_subdirectories = ["stdlib", "third_party"]
for typeshed_subdirectory_name in typeshed_subdirectories:
typeshed_subdirectory = os.path.join(typeshed_root, typeshed_subdirectory_name)
if (
not os.path.isdir(typeshed_subdirectory)
or typeshed_subdirectory_name == "tests"
or typeshed_subdirectory_name[0] == "."
):
continue
# Always prefer newer version over older version
version_names = sorted(os.listdir(typeshed_subdirectory), reverse=True)
for version_name in version_names:
# Anything under 2/ or 2.x is unusable for Pyre
if version_name.startswith("2") and version_name != "2and3":
continue
search_path.append(os.path.join(typeshed_subdirectory, version_name))
return search_path
def _convert_json_response_to_result(response: json_rpc.Response) -> Result:
if response.error:
error_code = ExitCode.FAILURE
else:
error_code = ExitCode.SUCCESS
return Result(output=json.dumps(response.result), code=error_code)
def executable_file(file_path: str) -> str:
if not os.path.isfile(file_path):
raise EnvironmentException("%s is not a valid file" % file_path)
if not os.access(file_path, os.X_OK):
raise EnvironmentException("%s is not an executable file" % file_path)
return file_path
class CommandParser(ABC):
NAME = ""
_exit_code: ExitCode = ExitCode.SUCCESS
def __init__(self, arguments: argparse.Namespace, original_directory: str) -> None:
self._arguments = arguments
self._local_configuration: Optional[str] = arguments.local_configuration
self._version: bool = arguments.version
self._debug: bool = arguments.debug
self._sequential: bool = arguments.sequential
self._strict: bool = arguments.strict
self._additional_checks: List[str] = arguments.additional_check
self._show_error_traces: bool = arguments.show_error_traces
self._output: str = arguments.output
self._verbose: bool = arguments.verbose
self._enable_profiling: bool = arguments.enable_profiling
self._enable_memory_profiling: bool = arguments.enable_memory_profiling
self._noninteractive: bool = arguments.noninteractive
self._hide_parse_errors: bool = arguments.hide_parse_errors
self._logging_sections: str = arguments.logging_sections
self._log_identifier: str = arguments.log_identifier
self._log_directory: str = arguments.log_directory
self._logger: str = arguments.logger
self._formatter: List[str] = arguments.formatter
self._targets: List[str] = arguments.targets
self._build: bool = arguments.build
self._use_buck_builder: bool = arguments.use_buck_builder
self._use_legacy_builder: bool = arguments.use_legacy_builder
self._buck_builder_debug: bool = arguments.buck_builder_debug
self._source_directories: List[str] = arguments.source_directories
self._filter_directory: List[str] = arguments.filter_directory
self._use_global_shared_analysis_directory: bool = arguments.use_global_shared_analysis_directory
self._no_saved_state: bool = arguments.no_saved_state
self._search_path: List[str] = arguments.search_path
self._preserve_pythonpath: bool = arguments.preserve_pythonpath
self._binary: str = arguments.binary
self._buck_builder_binary: Optional[str] = arguments.buck_builder_binary
self._buck_builder_target: Optional[str] = arguments.buck_builder_target
self._exclude: List[str] = arguments.exclude
self._typeshed: str = arguments.typeshed
self._save_initial_state_to: Optional[str] = arguments.save_initial_state_to
self._load_initial_state_from: Optional[str] = arguments.load_initial_state_from
self._changed_files_path: Optional[str] = arguments.changed_files_path
self._saved_state_project: Optional[str] = arguments.saved_state_project
self._use_json_sockets: bool = arguments.use_json_sockets
# Derived arguments
self._capable_terminal: bool = is_capable_terminal()
self._original_directory: str = original_directory
self._current_directory: str = find_project_root(self._original_directory)
self._local_configuration = self._local_configuration or find_local_root(
self._original_directory
)
self._log_directory: str = find_log_directory(
self._log_directory, self._current_directory, self._local_configuration
)
logger = self._logger
if logger:
self._logger = translate_path(self._original_directory, logger)
if self._debug or not self._capable_terminal:
self._noninteractive = True
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-l", "--local-configuration", type=str, help="Use a local configuration"
)
parser.add_argument(
"--version",
action="store_true",
help="Print the client and binary versions of Pyre.",
)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--sequential", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--strict", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--additional-check", action="append", help=argparse.SUPPRESS
)
parser.add_argument(
"--show-error-traces",
action="store_true",
help="Display errors trace information",
)
# Logging.
parser.add_argument(
"--output", choices=[TEXT, JSON], default=TEXT, help="How to format output"
)
parser.add_argument(
"--verbose", action="store_true", help="Enable verbose logging"
)
parser.add_argument(
"--enable-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--enable-memory-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"-n",
"--noninteractive",
action="store_true",
help="Disable interactive logging",
)
parser.add_argument(
"--hide-parse-errors",
action="store_true",
help="Hide detailed information about parse errors",
)
parser.add_argument(
"--logging-sections", help=argparse.SUPPRESS # Enable sectional logging.
)
parser.add_argument(
"--log-identifier",
default="",
help=argparse.SUPPRESS, # Add given identifier to logged samples.
)
parser.add_argument(
"--log-directory",
help=argparse.SUPPRESS, # Override default location for logs
)
parser.add_argument(
"--logger", help=argparse.SUPPRESS # Specify custom logging binary.
)
parser.add_argument("--formatter", help=argparse.SUPPRESS)
# Link tree determination.
buck_arguments = parser.add_argument_group("buck")
buck_arguments.add_argument(
"--target", action="append", dest="targets", help="The buck target to check"
)
buck_arguments.add_argument(
"--build",
action="store_true",
help="Freshly build all the necessary artifacts.",
)
buck_arguments.add_argument(
"--use-buck-builder",
action="store_true",
help="Use Pyre's experimental builder for Buck projects.",
)
buck_arguments.add_argument(
"--use-legacy-builder",
action="store_true",
help="Use Pyre's legacy builder for Buck projects.",
)
buck_arguments.add_argument(
"--buck-builder-debug", action="store_true", help=argparse.SUPPRESS
)
source_directories = parser.add_argument_group("source-directories")
source_directories.add_argument(
"--source-directory",
action="append",
dest="source_directories",
help="The source directory to check",
type=os.path.abspath,
)
source_directories.add_argument(
"--filter-directory", help=argparse.SUPPRESS # override filter directory
)
parser.add_argument(
"--use-global-shared-analysis-directory",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--no-saved-state",
action="store_true",
help="Don't attempt to load Pyre from a saved state.",
)
# Handling of search path
parser.add_argument(
"--search-path",
action="append",
default=[],
type=readable_directory,
help="Add an additional directory of modules and stubs to include"
" in the type environment",
)
parser.add_argument(
"--preserve-pythonpath",
action="store_true",
default=False,
help="Preserve the value of the PYTHONPATH environment variable and "
"inherit the current python environment's search path",
)
parser.add_argument(
"--binary",
default=None,
type=executable_file,
help="Location of the pyre binary",
)
parser.add_argument(
"--buck-builder-binary",
default=None,
help="Location of the buck builder binary",
)
parser.add_argument(
"--buck-builder-target", default=None, help=argparse.SUPPRESS
)
parser.add_argument(
"--exclude",
action="append",
default=[],
help="Exclude files and directories matching this regexp from parsing",
)
# Typeshed stubs location
parser.add_argument(
"--typeshed",
default=None,
type=readable_directory,
help="Location of the typeshed stubs",
)
parser.add_argument(
"--save-initial-state-to",
default=None,
help="Path to serialize pyre's initial state to.",
)
parser.add_argument(
"--load-initial-state-from", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--changed-files-path", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--saved-state-project", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--features", default=None, type=str, help=argparse.SUPPRESS
)
# Temporary flag to help migrate to json sockets for incremental and query
# commands.
parser.add_argument(
"--use-json-sockets",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
@classmethod
@abstractmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
pass
@abstractmethod
def _run(self) -> None:
""" Abstract method expected to be overridden by subclasses. """
pass
def run(self) -> "CommandParser":
self._run()
return self
def cleanup(self) -> None:
pass
def exit_code(self) -> ExitCode:
return self._exit_code
@property
def configuration(self) -> Optional[Configuration]:
return None
@property
def current_directory(self) -> Optional[str]:
return self._current_directory
@property
def local_configuration(self) -> Optional[str]:
return self._local_configuration
@property
def log_directory(self) -> str:
return self._log_directory
@property
def noninteractive(self) -> bool:
return self._noninteractive
class Command(CommandParser, ABC):
_buffer: List[str] = []
_call_client_terminated: bool = False
_local_root: str = ""
def __init__(
self,
arguments: argparse.Namespace,
original_directory: str,
configuration: Optional[Configuration] = None,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
super(Command, self).__init__(arguments, original_directory)
local_configuration = self._local_configuration
if local_configuration:
self._local_root = (
local_configuration
if os.path.isdir(local_configuration)
else os.path.dirname(local_configuration)
)
else:
self._local_root = self._original_directory
self._configuration: Configuration = (
configuration or self.generate_configuration()
)
self._strict: bool = arguments.strict or self._configuration.strict
self._logger: str = arguments.logger or (configuration and configuration.logger)
self._ignore_all_errors_paths: Iterable[str] = (
self._configuration.ignore_all_errors
)
self._number_of_workers: int = self._configuration.number_of_workers
self._version_hash: str = self._configuration.version_hash
self._formatter: Optional[str] = self._configuration.formatter
self._taint_models_path: List[str] = [
translate_path(self._original_directory, path)
for path in self._configuration.taint_models_path
]
self._analysis_directory: AnalysisDirectory = (
analysis_directory or self.generate_analysis_directory()
)
self._features: Optional[str] = arguments.features
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
pass
def generate_configuration(self) -> Configuration:
return Configuration(
local_configuration=self._local_configuration,
search_path=self._search_path,
binary=self._binary,
typeshed=self._typeshed,
preserve_pythonpath=self._preserve_pythonpath,
excludes=self._exclude,
logger=self._logger,
formatter=self._formatter,
log_directory=self._log_directory,
)
def generate_analysis_directory(self) -> AnalysisDirectory:
configuration = self._configuration
if not configuration:
return AnalysisDirectory(".")
else:
return resolve_analysis_directory(
self._arguments,
configuration,
self._original_directory,
self._current_directory,
)
def run(self) -> "Command":
configuration = self._configuration
if configuration and configuration.disabled:
LOG.log(log.SUCCESS, "Pyre will not run due to being explicitly disabled")
else:
self._run()
return self
def _run(self) -> None:
pass
def cleanup(self) -> None:
self._analysis_directory.cleanup()
def _flags(self) -> List[str]:
flags = []
if self._debug:
flags.extend(["-debug"])
if self._sequential:
flags.extend(["-sequential"])
if self._strict:
flags.extend(["-strict"])
if self._additional_checks:
flags.append("-additional-checks")
flags.append(",".join(self._additional_checks))
if self._show_error_traces:
flags.append("-show-error-traces")
if self._verbose:
flags.append("-verbose")
if not self._hide_parse_errors:
if self._logging_sections:
self._logging_sections = self._logging_sections + ",parser"
else:
self._logging_sections = "parser"
if not self._capable_terminal:
# Disable progress reporting for non-capable terminals.
# This helps in reducing clutter.
if self._logging_sections:
self._logging_sections = self._logging_sections + ",-progress"
else:
self._logging_sections = "-progress"
if self._logging_sections:
flags.extend(["-logging-sections", self._logging_sections])
if self._enable_profiling:
flags.extend(["-profiling-output", self.profiling_log_path()])
if self._enable_memory_profiling:
flags.extend(["-memory-profiling-output", self.profiling_log_path()])
if self._enable_profiling or self._enable_memory_profiling:
# Clear the profiling log first since in pyre binary it's append-only
remove_if_exists(self.profiling_log_path())
if self._current_directory:
flags.extend(["-project-root", self._current_directory])
if self._log_identifier:
flags.extend(["-log-identifier", self._log_identifier])
if self._logger:
flags.extend(["-logger", self._logger])
if self._log_directory:
flags.extend(["-log-directory", self._log_directory])
return flags
# temporarily always return empty list to unblock client release
def _feature_flags(self) -> List[str]:
features = self._features
if features:
lsp_features = ["click_to_fix", "hover", "go_to_definition"]
filtered = {
key: value
for key, value in json.loads(features).items()
if key in lsp_features
}
return ["-features", json.dumps(filtered)]
return []
def _read_stdout(self, stdout: Iterable[bytes]) -> None:
self._buffer = []
for line in stdout:
self._buffer.append(line.decode())
def _read_stderr(self, stream: Iterable[bytes]) -> None:
buffer = None
log_pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)")
try:
for line in stream:
if self._call_client_terminated:
return
line = line.decode().rstrip()
match = log_pattern.match(line)
if match:
if buffer:
buffer.flush()
buffer = log.Buffer(
section=match.groups()[0], data=[match.groups()[1]]
)
elif buffer:
buffer.append(line)
if buffer:
buffer.flush()
except Exception:
pass
def _call_client(self, command: str, capture_output: bool = True) -> Result:
if not os.path.isdir(self._analysis_directory.get_root()):
raise EnvironmentException(
"`{}` is not a link tree.".format(self._analysis_directory.get_root())
)
client_command = [self._configuration.binary, command]
client_command.extend(self._flags())
client_command.append(self._analysis_directory.get_root())
def limit_memory_usage() -> None:
try:
limit = 30 * 1024 * 1024 * 1024 # 30 GB
resource.setrlimit(resource.RLIMIT_DATA, (limit, limit))
except OSError:
# Run the process with unlimited memory if the underlying syscall fails.
pass
LOG.debug("Running `%s`", " ".join(client_command))
with subprocess.Popen(
client_command,
stdout=subprocess.PIPE if capture_output else None,
stderr=subprocess.PIPE,
preexec_fn=limit_memory_usage,
) as process:
# Read stdout output
if capture_output:
stdout_reader = threading.Thread(
target=self._read_stdout, args=(process.stdout,)
)
stdout_reader.daemon = True
stdout_reader.start()
# Read the error output and print it.
self._call_client_terminated = False
stderr_reader = threading.Thread(
target=self._read_stderr, args=(process.stderr,)
)
stderr_reader.daemon = True
stderr_reader.start()
with register_non_unique_process(
process.pid, self.NAME, self.log_directory
):
# Wait for the process to finish and clean up.
process.wait()
# In the exceptional case, make sure that we print the error messages.
if process.returncode != 0:
stderr_reader.join()
self._call_client_terminated = True
if capture_output:
# pyre-fixme: stdout_reader is not always declared!
stdout_reader.join()
output = ""
if capture_output:
output = "".join(self._buffer)
if process.returncode != 0 and capture_output:
output = "".join(self._buffer)
return Result(code=process.returncode, output=output)
def _relative_path(self, path: str) -> str:
return os.path.relpath(path, self._original_directory)
def _state(self) -> State:
pid_path = os.path.join(self._log_directory, "server/server.pid")
try:
with open(pid_path) as file:
pid = int(file.read())
os.kill(pid, 0) # throws if process is not running
return State.RUNNING
except Exception:
return State.DEAD
# will open a socket, send a request, read the response and close the socket.
def _send_and_handle_socket_request(
self, request: json_rpc.Request, version_hash: str
) -> None:
try:
with SocketConnection(self._log_directory) as socket_connection:
socket_connection.perform_handshake(version_hash)
socket_connection.send(request)
response = socket_connection.read()
result = _convert_json_response_to_result(response)
result.check()
self._socket_result_handler(result)
except (
SocketException,
ResourceWarning,
ClientException,
json_rpc.JSONRPCException,
) as exception:
LOG.error("Error while waiting for server: %s", str(exception))
LOG.error("Run `pyre restart` in order to restart the server.")
self._exit_code = ExitCode.FAILURE
# Will be overwritten in subclasses to specialize how json socket
# responses are handled.
def _socket_result_handler(self, result: Result) -> None:
log.stdout.write(result.output)
def profiling_log_path(self) -> str:
return os.path.join(self._log_directory, "profiling.log")
@property
def analysis_directory(self) -> AnalysisDirectory:
return self._analysis_directory
@property
def configuration(self) -> Optional[Configuration]:
return self._configuration
|
example_respmod_500_error.py
|
# -*- coding: utf8 -*-
import time
import threading
from icapserver import *
set_logger('debug')
class ExampleICAPHandler(BaseICAPRequestHandler):
def example_OPTIONS(self):
self.set_icap_response(200)
self.set_icap_header('Methods', 'RESPMOD, REQMOD')
self.set_icap_header('Service', 'ICAP Server' + ' ' + self._server_version)
self.set_icap_header('Options-TTL', '3600')
self.set_icap_header('Preview', '0')
self.send_headers(False)
def example_REQMOD(self):
self.no_adaptation_required()
def example_RESPMOD(self):
self.send_error(500, 'Something goes wrong :(')
class ExampleICAPServer():
def __init__(self, addr='', port=13440):
self.addr = addr
self.port = port
def start(self):
self.server = ICAPServer((self.addr, self.port), ExampleICAPHandler)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
return True
def stop(self):
self.server.shutdown()
self.server.server_close()
self.thread.join(2)
return True
try:
server = ExampleICAPServer()
server.start()
print 'Use Control-C to exit'
while True:
time.sleep(1)
except KeyboardInterrupt:
server.stop()
print "Finished"
|
actr.py
|
import json
import threading
import socket
import time
import os
import sys
current_connection = None
class request():
def __init__(self,id):
self.id = id
self.lock = threading.Lock()
self.cv = threading.Condition(self.lock)
self.complete = False
def notify_result(self):
self.cv.acquire()
self.complete = True
self.cv.notify()
self.cv.release()
locals = threading.local()
class actr():
def __init__(self,host,port):
self.interface = interface(host, port)
if self.interface.connected :
self.interface.echo_output()
def evaluate (self, *params):
try:
m = locals.model_name
except AttributeError:
m = False
p = list(params)
p.insert(1,m)
r = self.interface.send ("evaluate", *p)
if r[0] == False:
for e in r[1:]:
print (e)
return False
else:
return r[1:]
def evaluate_single(self,*params):
r = self.evaluate(*params)
if r:
return r[0]
else:
return False
def add_command(self,name,function,documentation="No documentation provided.",single=True,actr_name=None):
if name not in self.interface.commands.keys():
self.interface.add_command(name,function)
elif self.interface.commands[name] == function:
print("Command ",name," already exists for function ",function)
else:
print("Command ",name," already exists and is now being replaced by ",function)
self.interface.add_command(name,function)
existing = self.interface.send("check",name)
if existing[0] == True:
if existing[1] == None:
result = self.interface.send("add",name,name,documentation,single,actr_name)
if result[0]:
return result[1]
else:
return False
elif existing[2] == None:
print("Cannot add command ",name, " because it has already been added by a different owner.")
return False
else:
return True
else:
print("Invalid command name ",name," cannot be added.")
return False
def monitor_command(self,original,monitor):
r = self.interface.send("monitor",original,monitor)
if r[0] == False:
for e in r[1:]:
print (e)
return False
else:
return r[1:]
def remove_command_monitor(self,original,monitor):
r = self.interface.send("remove-monitor",original,monitor)
if r[0] == False:
for e in r[1:]:
print (e)
return False
else:
return r[1:]
def remove_command(self,name):
if name not in self.interface.commands.keys():
r = self.interface.send('remove',name)
if r[0] == False:
for e in r[1:]:
print (e)
return False
else:
return True
else:
del self.interface.commands[name]
r = self.interface.send("remove",name)
if r[0] == False:
for e in r[1:]:
print (e)
return False
else:
return True
def start (host=None,port=None):
global current_connection
if current_connection == None:
portfile = os.path.join(os.path.expanduser("~"),"act-r-port-num.txt")
hostfile = os.path.join(os.path.expanduser("~"),"act-r-address.txt")
if port == None and os.path.isfile(portfile):
with open(portfile, 'r') as f:
try:
port = int(f.readline())
except:
print("Problem reading ACT-R port number from",portfile,". Using default or 2650.")
port = 2650
elif port == None:
print("ACT-R port number file",portfile,"not found. Using default or 2650.")
port = 2650
if host == None and os.path.isfile(hostfile):
with open(hostfile, 'r') as f:
try:
host = f.readline()
except:
print("Problem reading ACT-R host from",hostfile,". Using default of 127.0.0.1.")
host = "127.0.0.1"
elif host == None:
print("ACT-R host file",hostfile,"not found. Using default of 127.0.0.1.")
host = "127.0.0.1"
try:
a = actr(host=host,port=port)
except:
print("Failed to connect to ACT-R with exception",sys.exc_info())
else:
if a.interface.connected :
a.interface.send("set-name","ACT-R Tutorial Python interface")
current_connection = a
return current_connection
else:
print("ACT-R connection NOT established, but no exception detected or already handled.")
else:
print("ACT-R is already connected.")
return current_connection
def connection ():
if current_connection == None:
s = start()
if s :
print("ACT-R connection has been started.")
return s
else:
print("Could not start ACT-R connection.")
else:
return current_connection
def stop():
global current_connection
if current_connection == None:
print("No current ACT-R connection to stop.")
else:
print("Closing down ACT-R connection.")
current_connection.interface.connected = False
current_connection.interface.sock.close()
current_connection = None
class interface():
def __init__(self,host,port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((host, port))
except:
self.connected = False
print("Error trying to connect to ACT-R at",host,":",port,"with exception",sys.exc_info())
else:
self.connected = True
self.cmd_id = 1
self.actions = {}
self.stream_lock = threading.Lock()
self.buffer = []
self.commands = {}
self.data_collector = threading.Thread(target=self.collect_data)
self.data_collector.daemon = True
self.data_collector.start()
self.id_lock = threading.Lock()
self.echo_count = 0
self.echo = False
self.show_output = True
def send(self,method,*params):
d = {}
r = request(self.cmd_id)
self.actions[self.cmd_id] = r
d['method'] = method
self.id_lock.acquire()
d['id'] = self.cmd_id
self.cmd_id += 1
self.id_lock.release()
d['params'] = params
message = json.dumps(d) + chr(4)
r.lock.acquire()
self.stream_lock.acquire()
self.sock.sendall(message.encode('utf-8'))
self.stream_lock.release()
while not r.complete:
r.cv.wait()
return [r.success] + r.results
def add_command(self,name,function):
self.commands[name] = function
def collect_data(self):
buffer= ''
c = True
while c:
try:
data = self.sock.recv(4096)
buffer += data.decode('utf-8')
while not chr(4) in buffer:
data = self.sock.recv(4096)
buffer += data.decode('utf-8')
while chr(4) in buffer:
pos = buffer.find(chr(4))
message = buffer[0:pos]
pos += 1
buffer = buffer[pos:]
self.process_message(json.loads(message))
except:
if self.connected:
print("ACT-R connection error connection no longer available.")
c = False
def process_message (self,d):
if 'result' in d.keys():
id =d['id']
r = self.actions[id]
if d['error'] is None:
r.success = True
r.results = d['result']
else:
r.success = False
errors=d['error']
r.results = [errors['message']]
self.actions.pop(id,None)
r.notify_result()
else:
if d['method'] == "evaluate" and d['params'][0] in self.commands.keys():
thread = threading.Thread(target=self.run_command,args=[self.commands[d['params'][0]],d['params'][0],d['params'][1],d['id'],d['params'][2:]])
thread.daemon = True
thread.start()
else:
f={}
f['id'] = d['id']
f['result'] = None
e={}
e['message'] = "Invalid method name" + d['params'][0]
f['error'] = e
message = json.dumps(f) + chr(4)
self.stream_lock.acquire()
self.sock.sendall(message.encode('utf-8'))
self.stream_lock.release()
def run_command (self,command,command_name,model,id,params):
locals.model_name = model
try:
if command:
if params == None:
result = command()
else:
result = command(*params)
else:
result = True
except:
error = True
problem = sys.exc_info()
else:
error = None
f={}
f['id'] = id
if error:
f['result'] = None
f['error'] = {'message': "Error %s while evaluating a command in Python for command: %s, model: %s, parameters: %s"%(problem,command_name,model,params)}
elif ((result is False) or (result is None)):
f['result']= [None]
f['error']= None
else:
if isinstance(result,tuple):
f['result']= result
else:
f['result']= [result]
f['error']= None
message = json.dumps(f) + chr(4)
self.stream_lock.acquire()
self.sock.sendall(message.encode('utf-8'))
self.stream_lock.release()
def output_monitor(self,string):
if self.show_output:
print(string.rstrip())
return True
def echo_output(self):
if not(self.echo):
if 'echo' not in self.commands.keys():
self.add_command("echo",self.output_monitor)
ready = False
while not(ready):
existing = self.send("check",'python-echo'+str(self.echo_count))
if existing[1] == None:
self.send("add","python-echo"+str(self.echo_count),"echo","Trace monitor for python client. Do not call directly.",True)
ready = True
else:
self.echo_count += 1
self.send("monitor","model-trace","python-echo"+str(self.echo_count))
self.send("monitor","command-trace","python-echo"+str(self.echo_count))
self.send("monitor","warning-trace","python-echo"+str(self.echo_count))
self.send("monitor","general-trace","python-echo"+str(self.echo_count))
self.echo = True
return True
else:
print("echo_output called when output was already on.")
return False
def no_output(self):
if self.echo:
self.send("remove-monitor","model-trace","python-echo"+str(self.echo_count))
self.send("remove-monitor","command-trace","python-echo"+str(self.echo_count))
self.send("remove-monitor","warning-trace","python-echo"+str(self.echo_count))
self.send("remove-monitor","general-trace","python-echo"+str(self.echo_count))
self.send("remove","python-echo"+str(self.echo_count))
self.echo = False
else:
print("no_output called when output was already off.")
current_connection = connection()
def current_model():
try:
m = locals.model_name
except AttributeError:
m = current_connection.evaluate_single('current-model')
return m
def set_current_model(name):
if name.lower() in (x.lower() for x in mp_models()):
locals.model_name = name
else:
print("%s is not one of the currently available models: %s"%(name,mp_models()))
def reset ():
return current_connection.evaluate_single("reset")
def reload (compile=False):
return current_connection.evaluate_single("reload",compile)
def run (time, real_time=False):
return current_connection.evaluate("run", time, real_time)
def run_full_time (time, real_time=False):
return current_connection.evaluate("run-full-time", time, real_time)
def run_until_time (time, real_time=False):
return current_connection.evaluate("run-until-time", time, real_time)
def run_n_events (event_count, real_time=False):
return current_connection.evaluate("run-n-events", event_count, real_time)
def run_until_condition(condition,real_time=False):
return current_connection.evaluate("run-until-condition", condition, real_time)
def buffer_chunk (*params):
return current_connection.evaluate_single("buffer-chunk", *params)
def whynot (*params):
return current_connection.evaluate_single("whynot", *params)
def whynot_dm (*params):
return current_connection.evaluate_single("whynot-dm", *params)
def penable (*params):
return current_connection.evaluate_single("penable", *params)
def pdisable (*params):
return current_connection.evaluate_single("pdisable", *params)
def load_act_r_model (path):
return current_connection.evaluate_single("load-act-r-model",path)
def load_act_r_code (path):
return current_connection.evaluate_single("load-act-r-code",path)
def goal_focus (goal=None):
return current_connection.evaluate_single("goal-focus",goal)
def clear_exp_window(win=None):
return current_connection.evaluate_single("clear-exp-window",win)
def open_exp_window(title,visible=True,width=300,height=300,x=300,y=300):
return current_connection.evaluate_single("open-exp-window", title, [["visible", visible], ["width", width],
["height", height], ["x", x], ["y", y]])
def add_text_to_exp_window(window,text,x=0,y=0,color='black',height=20,width=75,font_size=12):
return current_connection.evaluate_single("add-text-to-exp-window", window, text,[["color", color], ["width", width],
["height", height], ["x", x], ["y", y],
["font-size", font_size]])
def add_button_to_exp_window(window,text="",x=0,y=0,action=None,height=20,width=75,color='gray'):
return current_connection.evaluate_single("add-button-to-exp-window",window,[["color", color], ["width", width],
["height", height], ["x", x], ["y", y],
["text", text], ["action", action]])
def remove_items_from_exp_window(window,*items):
return current_connection.evaluate_single("remove-items-from-exp-window",window,*items)
def install_device(device):
return current_connection.evaluate_single("install-device",device)
def print_warning(warning):
current_connection.evaluate("print-warning",warning)
def act_r_output(output):
current_connection.evaluate("act-r-output",output)
def random(value):
return current_connection.evaluate_single("act-r-random",value)
def add_command(name,function=None,documentation="No documentation provided.",single=True,local_name=None):
return current_connection.add_command(name,function,documentation,single,local_name)
def monitor_command(original,monitor):
return current_connection.monitor_command(original,monitor)
def remove_command_monitor(original,monitor):
return current_connection.remove_command_monitor(original,monitor)
def remove_command(name):
return current_connection.remove_command(name)
def print_visicon():
return current_connection.evaluate_single("print-visicon")
def mean_deviation(results,data,output=True):
return current_connection.evaluate_single("mean-deviation",results,data,output)
def correlation(results,data,output=True):
return current_connection.evaluate_single("correlation",results,data,output)
def get_time(model_time=True):
return current_connection.evaluate_single("get-time",model_time)
def buffer_status (*params):
return current_connection.evaluate_single("buffer-status", *params)
def buffer_read (buffer):
return current_connection.evaluate_single("buffer-read", buffer)
def clear_buffer (buffer):
return current_connection.evaluate_single("clear-buffer", buffer)
def new_tone_sound (freq, duration, onset=False, time_in_ms=False):
return current_connection.evaluate_single("new-tone-sound", freq, duration, onset, time_in_ms)
def new_word_sound (word, onset=False, location='external', time_in_ms=False):
return current_connection.evaluate_single("new-word-sound", word, onset, location, time_in_ms)
def new_digit_sound (digit, onset=False, time_in_ms=False):
return current_connection.evaluate_single("new-digit-sound", digit, onset, time_in_ms)
def define_chunks (*chunks):
return current_connection.evaluate_single("define-chunks", *chunks)
def define_chunks_fct (chunks):
return current_connection.evaluate_single("define-chunks", *chunks)
def add_dm (*chunks):
return current_connection.evaluate_single("add-dm", *chunks)
def add_dm_fct (chunks):
return current_connection.evaluate_single("add-dm-fct", chunks)
def pprint_chunks (*chunks):
return current_connection.evaluate_single("pprint-chunks", *chunks)
def chunk_slot_value (chunk_name, slot_name):
return current_connection.evaluate_single("chunk-slot-value", chunk_name, slot_name)
def set_chunk_slot_value (chunk_name, slot_name, new_value):
return current_connection.evaluate_single("set-chunk-slot-value", chunk_name, slot_name, new_value)
def mod_chunk (chunk_name, *mods):
return current_connection.evaluate_single("mod-chunk", chunk_name, *mods)
def mod_focus (*mods):
return current_connection.evaluate_single("mod-focus", *mods)
def chunk_p (chunk_name):
return current_connection.evaluate_single("chunk-p",chunk_name)
def copy_chunk (chunk_name):
return current_connection.evaluate_single("copy-chunk",chunk_name)
def extend_possible_slots (slot_name, warn=True):
return current_connection.evaluate_single("extend-possible-slots",slot_name,warn)
def model_output (output_string):
return current_connection.evaluate_single("model-output",output_string)
def set_buffer_chunk (buffer_name, chunk_name, requested=True):
return current_connection.evaluate_single("set-buffer-chunk",buffer_name,chunk_name,requested)
def add_line_to_exp_window (window, start, end, color = False):
if color:
return current_connection.evaluate_single("add-line-to-exp-window",window,start,end,color)
else:
return current_connection.evaluate_single("add-line-to-exp-window",window,start,end)
def modify_line_for_exp_window (line, start, end, color = False):
if color:
return current_connection.evaluate_single("modify-line-for-exp-window",line,start,end,color)
else:
return current_connection.evaluate_single("modify-line-for-exp-window",line,start,end)
def start_hand_at_mouse ():
return current_connection.evaluate_single("start-hand-at-mouse")
def schedule_event (time, action, params=None, module=':NONE', priority=0, maintenance=False, destination=None, details=None,output=True,time_in_ms=False,precondition=None):
return current_connection.evaluate_single("schedule-event",time,action,[["params", params],["module", module],
["priority", priority],["maintenance", maintenance],
["destination", destination], ["details", details],
["output", output],["time-in-ms", time_in_ms],
["precondition", precondition]])
def schedule_event_now (action, params=None, module=':NONE', priority=0, maintenance=False, destination=None, details=None,output=True,precondition=None):
return current_connection.evaluate_single("schedule-event-now",action,[["params", params],["module", module],
["priority", priority],["maintenance", maintenance],
["destination", destination], ["details", details],
["output", output], ["precondition", precondition]])
def schedule_event_relative (time_delay, action, params=None, module=':NONE', priority=0, maintenance=False, destination=None, details=None,output=True,time_in_ms=False,precondition=None):
return current_connection.evaluate_single("schedule-event-relative",time_delay,action,[["params", params],["module", module],
["priority", priority],["maintenance", maintenance],
["destination", destination], ["details", details],
["output", output],["time-in-ms", time_in_ms],
["precondition", precondition]])
def schedule_event_after_module (after_module, action, params=None, module=':NONE', maintenance=False, destination=None, details=None, output=True, precondition=None, dynamic=False, delay=True, include_maintenance=False):
return current_connection.evaluate("schedule-event-after-module",after_module,action,[["params", params],["module", module],
["maintenance", maintenance],
["destination", destination], ["details", details],
["output", output],["delay", delay], ["dynamic", dynamic],
["precondition", precondition],["include-maintenance", include_maintenance]])
def schedule_break_relative (time_delay,time_in_ms=False, priority=":max", details=None):
return current_connection.evaluate_single("schedule-break-relative",time_delay,[["time-in-ms", time_in_ms],["priority", priority],["details",details]])
def mp_show_queue(indicate_traced=False):
return current_connection.evaluate_single("mp-show-queue",indicate_traced)
def print_dm_finsts():
return current_connection.evaluate_single("print-dm-finsts")
def spp (*params):
return current_connection.evaluate_single("spp", *params)
def mp_models():
return current_connection.evaluate_single("mp-models")
def all_productions():
return current_connection.evaluate_single("all-productions")
def buffers():
return current_connection.evaluate_single("buffers")
def printed_visicon():
return current_connection.evaluate_single("printed-visicon")
def print_audicon():
return current_connection.evaluate_single("print-audicon")
def printed_audicon():
return current_connection.evaluate_single("printed-audicon")
def printed_parameter_details(param):
return current_connection.evaluate_single("printed-parameter-details",param)
def sorted_module_names():
return current_connection.evaluate_single("sorted-module-names")
def modules_parameters(module):
return current_connection.evaluate_single("modules-parameters",module)
def modules_with_parameters():
return current_connection.evaluate_single("modules-with-parameters")
def used_production_buffers():
return current_connection.evaluate_single("used-production-buffers")
def record_history(*params):
return current_connection.evaluate_single("record-history",*params)
def stop_recording_history(*params):
return current_connection.evaluate_single("stop-recording-history",*params)
def get_history_data(history,*params):
return current_connection.evaluate_single("get-history-data",history,*params)
def history_data_available(history,file=False,*params):
return current_connection.evaluate_single("history-data-available",history,file,*params)
def process_history_data(processor,file=False,data_params=None,processor_params=None):
return current_connection.evaluate_single("process-history-data",processor,file,data_params,processor_params)
def save_history_data(history,file,comment="",*params):
return current_connection.evaluate_single("save-history-data",history,file,comment,*params)
def dm (*params):
return current_connection.evaluate_single("dm", *params)
def sdm (*params):
return current_connection.evaluate_single("sdm", *params)
def get_parameter_value(param):
return current_connection.evaluate_single("get-parameter-value",param)
def set_parameter_value(param,value):
return current_connection.evaluate_single("set-parameter-value",param,value)
def get_system_parameter_value(param):
return current_connection.evaluate_single("get-system-parameter-value",param)
def set_system_parameter_value(param,value):
return current_connection.evaluate_single("set-system-parameter-value",param,value)
def sdp (*params):
return current_connection.evaluate_single("sdp", *params)
def simulate_retrieval_request (*spec):
return current_connection.evaluate_single("simulate-retrieval-request", *spec)
def saved_activation_history ():
return current_connection.evaluate_single("saved-activation-history")
def print_activation_trace (time, ms = True):
return current_connection.evaluate_single("print-activation-trace",time,ms)
def print_chunk_activation_trace (chunk, time, ms = True):
return current_connection.evaluate_single("print-chunk-activation-trace",chunk,time,ms)
def pp (*params):
return current_connection.evaluate_single("pp", *params)
def trigger_reward(reward,maintenance=False):
return current_connection.evaluate_single("trigger-reward",reward,maintenance)
def define_chunk_spec (*spec):
return current_connection.evaluate_single("define-chunk-spec", *spec)
def chunk_spec_to_chunk_def(spec_id):
return current_connection.evaluate_single("chunk-spec-to-chunk-def", spec_id)
def release_chunk_spec(spec_id):
return current_connection.evaluate_single("release-chunk-spec-id", spec_id)
def schedule_simple_set_buffer_chunk (buffer, chunk, time, module='NONE', priority=0, requested=True):
return current_connection.evaluate_single("schedule-simple-set-buffer-chunk",buffer,chunk,time,module,priority,requested)
def schedule_simple_mod_buffer_chunk (buffer, mod_list_or_spec, time, module='NONE', priority=0):
return current_connection.evaluate_single("schedule-simple-mod-buffer-chunk",buffer,mod_list_or_spec,time,module,priority)
def schedule_set_buffer_chunk (buffer, chunk, time, module=':NONE', priority=0, output='low',time_in_ms=False,requested=True):
return current_connection.evaluate_single("schedule-set-buffer-chunk",buffer,chunk,time,[["module", module],
["priority", priority],["output", output],["time-in-ms", time_in_ms],
["requested", requested]])
def schedule_mod_buffer_chunk (buffer, mod_list_or_spec, time, module=':NONE', priority=0, output='low',time_in_ms=False):
return current_connection.evaluate_single("schedule-mod-buffer-chunk",buffer,mod_list_or_spec,time,[["module", module],
["priority", priority],["output", output],["time-in-ms", time_in_ms]])
def undefine_module(name):
return current_connection.evaluate_single("undefine-module", name)
def delete_chunk(name):
return current_connection.evaluate_single("delete-chunk", name)
def purge_chunk(name):
return current_connection.evaluate_single("purge-chunk", name)
def define_module (name, buffers,params,interface=None):
return current_connection.evaluate_single("define-module", name, buffers, params, interface)
def command_output(string):
return current_connection.evaluate_single("command-output",string)
def chunk_copied_from(chunk_name):
return current_connection.evaluate_single("chunk-copied-from",chunk_name)
def mp_time ():
return current_connection.evaluate_single("mp-time")
def mp_time_ms ():
return current_connection.evaluate_single("mp-time-ms")
def predict_bold_response(start=None,end=None,output=None):
if start == None:
return current_connection.evaluate_single("predict-bold-response")
elif end == None:
return current_connection.evaluate_single("predict-bold-response", start)
elif output == None:
return current_connection.evaluate_single("predict-bold-response", start, end)
else:
return current_connection.evaluate_single("predict-bold-response", start, end, output)
def pbreak (*params):
return current_connection.evaluate_single("pbreak", *params)
def punbreak (*params):
return current_connection.evaluate_single("punbreak", *params)
def create_image_for_exp_window(window,text,file,x=0,y=0,width=50,height=50,action=None):
return current_connection.evaluate_single("create-image-for-exp-window", window, text, file,
[['x', x],['y', y],['width', width],['height', height],['action', action]])
def add_image_to_exp_window(window,text,file,x=0,y=0,width=50,height=50,action=None):
return current_connection.evaluate_single("add-image-to-exp-window", window, text, file,
[['x', x],['y', y],['width', width],['height', height],['action', action]])
def add_items_to_exp_window(window, *items):
return current_connection.evaluate_single("add-items-to-exp-window",window, *items)
def add_visicon_features(*features):
return current_connection.evaluate_single("add-visicon-features",*features)
def delete_visicon_features(*features):
return current_connection.evaluate_single("delete-visicon-features",*features)
def delete_all_visicon_features():
return current_connection.evaluate_single("delete-all-visicon-features")
def modify_visicon_features(*features):
return current_connection.evaluate_single("modify-visicon-features",*features)
def running():
return current_connection.evaluate_single("act-r-running-p")
def stop_output():
current_connection.interface.no_output()
def resume_output():
current_connection.interface.echo_output()
def hide_output():
current_connection.interface.show_output = False
def unhide_output():
current_connection.interface.show_output = True
def process_events():
time.sleep(0)
def permute_list(l):
indexes = list(range(len(l)))
new_indexes = current_connection.evaluate_single("permute-list",indexes)
result = []
for i in new_indexes:
result.append(l[i])
return result
|
utils.py
|
import re
import queue
import logging
import threading
logger = logging.getLogger(__name__)
# S3 multi-part upload parts must be larger than 5mb
KB = 1024
MB = KB**2
GB = KB**3
TB = KB**4
MIN_S3_SIZE = 5 * MB
def _threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
for _ in range(3):
# re try 3 times before giving up
try:
response = callback(item, *args, **kwargs)
except Exception:
logger.exception("Retry failed batch of: {}".format(item))
else:
item_list.append(response)
break
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
def _create_s3_client(session, s3_client_kwargs=None):
if s3_client_kwargs is None:
s3_client_kwargs = {}
return session.client('s3', **s3_client_kwargs)
def _chunk_by_size(file_list, min_file_size):
"""Split list by size of file
Arguments:
file_list {list} -- List of tuples as (<filename>, <file_size>)
min_file_size {int} -- Min part file size in bytes
Returns:
list -- Each list of files is the min file size
"""
grouped_list = []
current_list = []
current_size = 0
current_index = 1
for p in file_list:
current_size += p[1]
current_list.append(p)
if min_file_size is not None and current_size > min_file_size:
grouped_list.append((current_index, current_list))
current_list = []
current_size = 0
current_index += 1
# Get anything left over
if current_size != 0:
grouped_list.append((current_index, current_list))
return grouped_list
def _convert_to_bytes(value):
"""Convert the input value to bytes
Arguments:
value {string} -- Value and size of the input with no spaces
Returns:
float -- The value converted to bytes as a float
Raises:
ValueError -- if the input value is not a valid type to convert
"""
if value is None:
return None
value = value.strip()
sizes = {'KB': 1024,
'MB': 1024**2,
'GB': 1024**3,
'TB': 1024**4,
}
if value[-2:].upper() in sizes:
return float(value[:-2].strip()) * sizes[value[-2:].upper()]
elif re.match(r'^\d+(\.\d+)?$', value):
return float(value)
elif re.match(r'^\d+(\.\d+)?\s?B$', value):
return float(value[:-1])
else:
raise ValueError("Value {} is not a valid size".format(value))
|
views.py
|
from rest_framework import pagination
from rest_framework.response import Response
from multiprocessing import Process
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator
from rest_framework.parsers import JSONParser
import asyncio, logging
from gaia.serializer import (
ArticlesSerializer,
BrandSerializer,
ArticlesWithIndexSerializer
)
from gaia.jobs.analyze import analyze
from gaia.dao.article_dao import ArticleDao
from gaia.dao.brand_dao import BrandDao
from gaia.pagination import CursorPagination
from gaia.gaia_utils import GaiaUtils
INDEX_URL_FORMAT = '{base_url}/?page={page}&page_size={page_size}'
@csrf_exempt
def article_list(request, brand_name):
brand_name = brand_name.lower()
def handle_request_with_page_size(page, page_size):
base_url = request.build_absolute_uri('/articles/{}'.format(brand_name))
try:
page = GaiaUtils.convert_positive_int(page)
page_size = GaiaUtils.convert_positive_int(page_size)
except Exception:
return JsonResponse(
{'error_message': 'Param should be positive number'.format(page, page_size)}, status=402)
articles = ArticleDao.get_articles_by_brand(brand_name)
paginator = Paginator(articles, page_size)
current_page = paginator.page(page)
next = None
prev = None
if current_page.has_next():
next = INDEX_URL_FORMAT.format(
base_url=base_url,
page=page+1,
page_size=page_size
)
if current_page.has_previous():
prev = INDEX_URL_FORMAT.format(
base_url=base_url,
page=page-1,
page_size=page_size
)
result = {
'total': paginator.count,
'next': next,
'prev': prev,
'results': paginator.page(page).object_list
}
serializer = ArticlesWithIndexSerializer(result)
return JsonResponse(serializer.data, safe=False, status=200)
def handle_request(page):
if not page is None:
try:
page = GaiaUtils.convert_positive_int(page)
except Exception:
return JsonResponse(
{'error_message': 'Param should be positive number'}, status=402)
pagination = CursorPagination(ArticleDao.get_articles_by_brand_with_limit,
url=request.build_absolute_uri())
pagination.paginate_query(brand_name, page)
serializer = ArticlesSerializer(pagination.paginate_result())
return JsonResponse(serializer.data, safe=False, status=200)
if request.method == 'GET':
page = request.GET.get('page')
page_size = request.GET.get('page_size')
if not page_size is None:
return handle_request_with_page_size(page, page_size)
return handle_request(page)
@csrf_exempt
def brand_detail(request, name):
if request.method == 'GET':
brand = BrandDao.get_brand_by_name(name.lower())
if brand is None:
return JsonResponse({'error_message': 'Brand not found'}, status=404)
serializer = BrandSerializer(brand)
return JsonResponse(serializer.data, safe=False, status=200)
@csrf_exempt
def news_scraper(request):
if request.method == 'POST':
data = JSONParser().parse(request)
'''
TODO: change to label news task
data = {
'filename': filename,
'bucket_name': bucket_name
}
'''
label_news = Process(target=analyze, args=(data["filename"],))
label_news.start()
return JsonResponse({'success': True}, status=200)
def warning(data):
logging.warning(data)
|
data_reader_Audio_RIRs.py
|
import copy
import fnmatch
import os
import random
import re
import threading
import math
import librosa
import numpy as np
import tensorflow as tf
import json
import pickle
from numpy.random import permutation
from numpy.random import randint
import numpy as np
import pandas as pd
from scipy import signal
import scipy
from read_Audio_RIRs import *
from speech_embedding import mix_generator as emb_mix_generator
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
CLEAN_DATA_RANGE = {"gender": ['m', 'f'],
"num": list(np.arange(1, 10)),
"script": [1,2,3,4],
"device": ['clean'],
"scene": []}
CLEAN_TEST_DATA_RANGE = {"gender": ['m', 'f'],
"num": [10],
"script": [5],
"device": ['clean'],
"scene": []}
CLEAN_FEMALE_DATA_RANGE = {"gender": ['f'],
"num": list(np.arange(1, 10)),
"script": [1,2,3,4],
"device": ['clean'],
"scene": []}
CLEAN_FEMALE_TEST_DATA_RANGE = {"gender": ['f'],
"num": [10],
"script": [5],
"device": ['clean'],
"scene": []}
DIRECTORY="daps"
GENDERS = ["f", "m"]
NUMS = range(1, 11)
SCRIPTS = range(1, 6)
DEVICES = ["ipad", "ipadflat", "iphone"]
SCENES = ["office1", "office2", "confroom1", "confroom2", "livingroom1", "bedroom1", "balcony1"]
def query_joint_yield_pair(gender=None, num=None, script=None, device=None,
scene=None, directory=DIRECTORY, directory_produced=DIRECTORY, exam_ignored=True, randomized=False,
sample_rate=None):
'''
inputs are all lists
'''
if exam_ignored:
filtered_gender = gender if gender else GENDERS
filtered_num = num if num else NUMS
filtered_script = script if script else SCRIPTS
filtered_device = device if device else DEVICES
filtered_scene = scene if scene else SCENES
else:
filtered_gender = [g for g in gender if g in GENDERS] if gender else GENDERS
filtered_num = [n for n in num if n in NUMS] if num else NUMS
filtered_script = [s for s in script if s in SCRIPTS] if script else SCRIPTS
filtered_device = [d for d in device if d in DEVICES] if device else DEVICES
filtered_scene = [s for s in scene if s in SCENES ] if scene else SCENES
book = [ (g, n, st, d, s) for g in filtered_gender for n in filtered_num
for st in filtered_script for d in filtered_device for s in filtered_scene]
if randomized:
book = permutation(book)
for (g, n, st, d, s) in book:
filename = directory+"/"+d+"_"+s+"/"+g+str(n)+"_script"+str(st)+"_"+d+"_"+s+".wav"
produced_filename = directory_produced+"/clean/"+g+str(n)+"_script"+str(st)+"_clean.wav"
try:
# print(filename, produced_filename)
input_audio, _ = librosa.load(filename, sr=sample_rate, mono=True, dtype=np.float32)
target_audio, _ = librosa.load(produced_filename, sr=sample_rate, mono=True, dtype=np.float32)
input_audio = butter_highpass_filter(input_audio, 30, sample_rate, order=5)
target_audio = butter_highpass_filter(target_audio, 30, sample_rate, order=5)
input_audio = input_audio.reshape(-1, 1)
target_audio = target_audio.reshape(-1, 1)
yield input_audio, target_audio, {"gender": g, "num": n, "script": st, "device": d, "scene": s}
except Exception as e:
print(e)
continue
def query_joint_yield(gender=None, num=None, script=None, device=None,
scene=None, directory=DIRECTORY, exam_ignored=True, randomized=False):
'''
inputs are all lists
'''
if exam_ignored:
filtered_gender = gender if gender else GENDERS
filtered_num = num if num else NUMS
filtered_script = script if script else SCRIPTS
filtered_device = device if device else DEVICES
filtered_scene = scene if scene else SCENES
else:
filtered_gender = [g for g in gender if g in GENDERS] if gender else GENDERS
filtered_num = [n for n in num if n in NUMS] if num else NUMS
filtered_script = [s for s in script if s in SCRIPTS] if script else SCRIPTS
filtered_device = [d for d in device if d in DEVICES] if device else DEVICES
filtered_scene = [s for s in scene if s in SCENES ] if scene else SCENES
book = [ (g, n, st) for g in filtered_gender for n in filtered_num
for st in filtered_script]
filenames = []
estimated_totals = len(book)
for (g, n, st) in book:
filename = directory+"/clean/"+g+str(n)+"_script"+str(st)+"_clean"+".wav"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def trim_silence(audio, threshold=0.05, frame_length=2048):
'''Removes silence at the beginning and end of a sample.'''
if audio.size < frame_length:
frame_length = audio.size
energy = librosa.feature.rms(audio, frame_length=frame_length)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return ((audio[indices[0]:indices[-1]], indices)
if indices.size else (audio[0:0], None))
# epsilon = 1e-9
# SR = 16000
# '''
# The STFT parameters were: 10 ms
# hop-size, 25 ms frame-length, and Hann window.
# '''
# N_FFT = int(SR * 0.025)
# SHIFT = int(SR * 0.010)
# '''
# # Frames by # channels
# '''
# def stft(y):
# return np.abs(librosa.core.stft(np.squeeze(y), n_fft=N_FFT, hop_length=SHIFT)).T
def approx_binary_label(D):
raw_score = (-1*D/20.0)
exp_score = np.exp(raw_score)
binary_label = exp_score / np.sum(exp_score)
return binary_label
def extract_speaker_id(speech_filename):
label = speech_filename.split("/")[-1][0:3]
try:
speaker_id = int(label[1:3])
if label[0]=="f":
speaker_id = speaker_id + 10
except:
speaker_id = -1
if speaker_id>=0:
speaker_binary_label = tf.keras.utils.to_categorical(speaker_id-1, num_classes=20)
else:
speaker_binary_label = np.zeros((20,))
return speaker_id, speaker_binary_label
class MixGeneratorSpec_single:
'Generates data for Keras'
def __init__(self, speech_filenames, reverb_filenames, noise_filenames, sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True, augment_speech=False, augment_reverb=False, norm_volume=False,
inject_noise=False,
raw_stft_similarity_score = None,
norm_reverb_peak=True,
SNRdB=None,
in_memory=1.0,
cut_length=160000):
'Initialization'
self.speech_filenames = speech_filenames
self.reverb_filenames = reverb_filenames
self.noise_filenames = noise_filenames
if speech_data_holder:
self.speech_data_holder = speech_data_holder
else:
self.speech_data_holder = {}
if reverb_data_holder:
self.reverb_data_holder = reverb_data_holder
else:
self.reverb_data_holder = {}
if noise_data_holder:
self.noise_data_holder = noise_data_holder
else:
self.noise_data_holder = {}
self.sample_rate = sample_rate
self.num_classes = num_classes
self.shuffle = shuffle
self.augment_speech = augment_speech
self.augment_reverb = augment_reverb
self.norm_volume = norm_volume
self.inject_noise = inject_noise
self.norm_reverb_peak = norm_reverb_peak
self.raw_stft_similarity_score = raw_stft_similarity_score
self.SNRdB = SNRdB
self.in_memory = in_memory
self.cut_length = int(cut_length)
self.epoch_index = 0
self.on_epoch_end()
# print(len(self.indexes))
def on_epoch_end(self):
'Updates indexes after each epoch'
idx0, idx1 = np.meshgrid(np.arange(len(self.speech_filenames)),
np.arange(len(self.reverb_filenames)))
idx0 = idx0.reshape((-1))
idx1 = idx1.reshape((-1))
self.indexes = list(zip(idx0, idx1))
print("Epoch: " + str(self.epoch_index) + " speech: " + str(len(self.speech_filenames)) + " reveb: " + str(len(self.reverb_filenames)) + " num samples: " + str(len(self.indexes)) + "\n")
if self.shuffle == True:
np.random.shuffle(self.indexes) # in-place shuffle
self.epoch_index = self.epoch_index + 1
def num_elements(self):
return len(self.indexes)
def get_speech_audio(self, speech_filename=None, idx=0):
if speech_filename is None:
speech_filename = self.speech_filenames[int(idx)]
if speech_filename in self.speech_data_holder:
speech_audio = self.speech_data_holder[speech_filename]
else:
speech_audio, _ = librosa.core.load(speech_filename, sr=self.sample_rate, mono=True, dtype=np.float32)
if len(self.speech_data_holder.keys()) < self.in_memory * len(self.speech_filenames):
self.speech_data_holder[speech_filename] = speech_audio
return speech_audio, speech_filename
def get_reverb_audio(self, reverb_filename=None, idx=0):
if reverb_filename is None:
reverb_filename = self.reverb_filenames[int(idx)]
if reverb_filename["filepath"] in self.reverb_data_holder:
reverb_audio = self.reverb_data_holder[reverb_filename["filepath"]]
else:
reverb_audio, _ = librosa.core.load(reverb_filename["filepath"], sr=self.sample_rate, mono=True, dtype=np.float32)
if len(self.reverb_data_holder.keys()) < self.in_memory * len(self.reverb_filenames):
self.reverb_data_holder[reverb_filename["filepath"]] = reverb_audio
# number = int(reverb_filename.split("/")[-1][1:4])
number = reverb_filename["id"]
return reverb_audio, number, reverb_filename
def get_noise_audio(self, noise_filename=None, idx=0):
if noise_filename is None:
noise_filename = self.noise_filenames[int(idx)]
if noise_filename in self.noise_data_holder:
noise = self.noise_data_holder[noise_filename]
else:
noise, _ = librosa.core.load(noise_filename, sr=self.sample_rate, mono=True, dtype=np.float32)
if len(self.noise_data_holder.keys()) < self.in_memory * len(self.noise_filenames):
self.noise_data_holder[noise_filename] = noise
return noise, noise_filename
def __iter__(self):
while True:
for idx in self.indexes:
speech_filename = self.speech_filenames[int(idx[0])]
reverb_filename = self.reverb_filenames[int(idx[1])]
speech_audio, _ = self.get_speech_audio(speech_filename)
speech_audio, keep_indices = trim_silence(speech_audio)
reverb_audio, number, _ = self.get_reverb_audio(reverb_filename)
if self.shuffle:
rand_start = int(np.random.randint(0, len(speech_audio) - self.cut_length, 1))
speech_audio = speech_audio[rand_start:rand_start+self.cut_length]
# inject noise here: /mnt/ilcompfbd1/user/jsu/reverb_tools_for_Generate_SimData/NOISE
if self.inject_noise:
if np.random.uniform()<0.9:
noise_idx = int(np.random.randint(0, len(self.noise_filenames), 1))
noise, _ = self.get_noise_audio(idx=noise_idx)
else:
noise = emb_mix_generator.generate_gaussian_noise(len(speech_audio))
else:
noise = None
noisy_audio, speech_audio, _, pre_noisy_audio = emb_mix_generator.mix_reverb_noise(speech_audio,
reverb_audio,
sample_rate=self.sample_rate,
noise=noise,
augment_speech=self.augment_speech,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
SNRdB=self.SNRdB)
if number<self.num_classes:
binary_label = tf.keras.utils.to_categorical(number, num_classes=self.num_classes)
else:
if self.raw_stft_similarity_score is not None:
binary_label = self.raw_stft_similarity_score[number, :]
else:
binary_label = np.zeros((self.num_classes,))
speaker_id, speaker_binary_label = extract_speaker_id(speech_filename)
string_label = reverb_filename["filepath"].split("/")[-1].split(".")[0]+"|"+speech_filename.split("/")[-1].split(".")[0]
yield noisy_audio[:, np.newaxis], pre_noisy_audio[:, np.newaxis], speech_audio[:, np.newaxis], binary_label, speaker_binary_label, string_label
self.on_epoch_end()
class MixGeneratorSpec_pair(MixGeneratorSpec_single):
'Generates data for Keras'
def __init__(self, speech_filenames, reverb_filenames, noise_filenames, sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True, augment_speech=False, augment_reverb=False, norm_volume=False,
inject_noise=False, raw_stft_similarity_score=None,
norm_reverb_peak=True,
SNRdB=None,
in_memory=1.0,
cut_length=160000):
'Initialization'
super().__init__(speech_filenames, reverb_filenames, noise_filenames, sample_rate,
speech_data_holder=speech_data_holder,
reverb_data_holder=reverb_data_holder,
noise_data_holder=noise_data_holder,
num_classes=num_classes,
shuffle=shuffle, augment_speech=augment_speech, augment_reverb=augment_reverb, norm_volume=norm_volume,
inject_noise=inject_noise, raw_stft_similarity_score=raw_stft_similarity_score,
norm_reverb_peak=norm_reverb_peak,
SNRdB=SNRdB,
in_memory=in_memory,
cut_length=cut_length)
def on_epoch_end(self):
'Updates indexes after each epoch'
idx1, idx2 = np.meshgrid(np.arange(len(self.reverb_filenames)),
np.arange(len(self.reverb_filenames)))
idx1 = idx1.reshape((-1))
idx2 = idx2.reshape((-1))
print(len(self.speech_filenames), len(self.reverb_filenames), idx1.shape, idx2.shape)
self.indexes = list(zip(idx1, idx2))
if self.shuffle == True:
np.random.shuffle(self.indexes) # in-place shuffle
self.epoch_index = self.epoch_index + 1
def __iter__(self):
while True:
for idx in self.indexes:
speech_audio, speech_filename = self.get_speech_audio(idx=np.random.randint(0, len(self.speech_filenames), 1))
speech_audio, keep_indices = trim_silence(speech_audio)
if self.shuffle:
rand_start = int(np.random.randint(0, len(speech_audio) - self.cut_length, 1))
speech_audio = speech_audio[rand_start:rand_start+self.cut_length]
reverb_audio, number, reverb_filename = self.get_reverb_audio(idx=idx[0])
reverb_audio_2, number_2, reverb_filename_2 = self.get_reverb_audio(idx=idx[1])
# inject noise here: /mnt/ilcompfbd1/user/jsu/reverb_tools_for_Generate_SimData/NOISE
if self.inject_noise:
if np.random.uniform()<0.95:
noise, _ = self.get_noise_audio(idx=np.random.randint(0, len(self.noise_filenames), 1))
else:
noise = emb_mix_generator.generate_gaussian_noise(len(speech_audio))
if np.random.uniform()<0.95:
noise_2, _ = self.get_noise_audio(idx=np.random.randint(0, len(self.noise_filenames), 1))
else:
noise_2 = emb_mix_generator.generate_gaussian_noise(len(speech_audio))
else:
noise = None
noise_2 = None
noisy_audio, speech_audio, _, _ = emb_mix_generator.mix_reverb_noise(speech_audio,
reverb_audio,
sample_rate=self.sample_rate,
noise=noise,
augment_speech=self.augment_speech,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
SNRdB=self.SNRdB)
noisy_audio_2, _, _, pre_noisy_audio_2 = emb_mix_generator.mix_reverb_noise(speech_audio,
reverb_audio_2,
sample_rate=self.sample_rate,
noise=noise_2,
augment_speech=False,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
SNRdB=self.SNRdB)
if number<self.num_classes:
binary_label = tf.keras.utils.to_categorical(number, num_classes=self.num_classes)
else:
if self.raw_stft_similarity_score is not None:
binary_label = self.raw_stft_similarity_score[number, :]
else:
binary_label = np.zeros((self.num_classes,))
if number_2<self.num_classes:
binary_label_2 = tf.keras.utils.to_categorical(number_2, num_classes=self.num_classes)
else:
if self.raw_stft_similarity_score is not None:
binary_label_2 = self.raw_stft_similarity_score[number_2, :]
else:
binary_label_2 = np.zeros((self.num_classes,))
string_label = reverb_filename["filepath"].split("/")[-1].split(".")[0]+"|"+speech_filename.split("/")[-1].split(".")[0]
string_label_2 = reverb_filename_2["filepath"].split("/")[-1].split(".")[0]+"|"+speech_filename.split("/")[-1].split(".")[0]
yield noisy_audio[:, np.newaxis], noisy_audio_2[:, np.newaxis], pre_noisy_audio_2[:, np.newaxis], speech_audio[:, np.newaxis], binary_label, binary_label_2, string_label, string_label_2
self.on_epoch_end()
class DataReader(object):
'''Generic background audio reader that preprocesses audio files
and en
s them into a TensorFlow queue.'''
def __init__(self,
directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=True,
queue_size=32,
data_range=CLEAN_DATA_RANGE,
test_data_range=CLEAN_TEST_DATA_RANGE,
disc_thread_enabled=True,
spec_generator=None,
use_label_class=False,
hint_window=256,
inject_noise=False,
augment_reverb=False,
augment_speech=True,
norm_volume=False,
stft_similarity=None,
norm_reverb_peak=True):
self.directory = os.path.abspath(directory)
print(self.directory)
self.data_range = data_range
self.test_data_range = test_data_range
self.coord = coord
self.sample_size = sample_size
self.random_crop = random_crop
self.target_size = target_size
self.hint_size = hint_size
self.sample_rate = sample_rate
self.silence_threshold = 0.15
self.disc_thread_enabled = disc_thread_enabled
self.use_label_class = use_label_class
self.hint_window = hint_window
self.inject_noise = inject_noise
self.augment_reverb = augment_reverb
self.augment_speech = augment_speech
self.norm_volume = norm_volume
self.norm_reverb_peak = norm_reverb_peak
if use_label_class and augment_reverb:
raise ValueError("Reverbs can not be augmented when class label is used for conditioning.")
self.stft_similarity = stft_similarity
if self.stft_similarity is not None:
raw_score = (-1*self.stft_similarity[:, :200]/20.0)
exp_score = np.exp(raw_score)
self.raw_stft_similarity_score = exp_score / np.sum(exp_score, axis=-1, keepdims=True)
else:
self.raw_stft_similarity_score = None
self.spec_generator = spec_generator
self.train_filenames = query_joint_yield(gender=data_range["gender"],
num=data_range["num"],
script=data_range["script"],
device=data_range["device"],
scene=data_range["scene"],
directory=directory,
exam_ignored=True,
randomized=True)
self.test_filenames = query_joint_yield(gender=test_data_range["gender"],
num=test_data_range["num"],
script=test_data_range["script"],
device=test_data_range["device"],
scene=test_data_range["scene"],
directory=directory,
exam_ignored=True,
randomized=True)
#%% Reverb audio files
# 0-199, 271-595 Train
# 200-270 Test
# "h233"/232 is missing
self.class_dict, self.classes = get_Audio_RIR_classes("/home/code-base/runtime/experiments/weakly-aligned-denoising/rir_classes.json")
self.reverb_train_filenames, self.reverb_train_data_holder=read_Audio_RIRs(sr=self.sample_rate,
class_dict=self.class_dict,
subset_range=list(range(200))+list(range(271, 596)))
self.reverb_test_filenames, self.reverb_test_data_holder=read_Audio_RIRs(sr=self.sample_rate,
class_dict=self.class_dict,
subset_range=list(range(200, 271)))
self.noise_filenames, self.noise_data_holder = read_noise(sr=self.sample_rate,
root="/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/subnoises", preload=False)
######
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder,
self.hint_placeholder,
self.speaker_hint_placeholder,
self.target_placeholder])
self.disc_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.disc_ref_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.disc_queue = tf.PaddingFIFOQueue(16,
['float32', 'float32'],
shapes=[(None, 1), (None, 1)])
self.disc_enqueue = self.disc_queue.enqueue([self.disc_placeholder,
self.disc_ref_placeholder])
"""For val set"""
self.test_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.test_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_queue = tf.PaddingFIFOQueue(40,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1)])
self.test_enqueue = self.test_queue.enqueue([self.test_placeholder,
self.test_hint_placeholder,
self.test_speaker_hint_placeholder,
self.test_target_placeholder])
"""For test set"""
self.test_ext_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_ext_speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.test_ext_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_queue = tf.PaddingFIFOQueue(40,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1)])
self.test_ext_enqueue = self.test_ext_queue.enqueue([self.test_ext_placeholder,
self.test_ext_hint_placeholder,
self.test_ext_speaker_hint_placeholder,
self.test_ext_target_placeholder])
"""For real set"""
self.test_real_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.test_real_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_queue = tf.PaddingFIFOQueue(16,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1)])
self.test_real_enqueue = self.test_real_queue.enqueue([self.test_real_placeholder,
self.test_real_hint_placeholder,
self.test_real_speaker_hint_placeholder,
self.test_real_target_placeholder])
"""For inference"""
self.infer_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.infer_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'string'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1), (None,)])
self.infer_enqueue = self.infer_queue.enqueue([self.infer_placeholder,
self.infer_hint_placeholder,
self.infer_speaker_hint_placeholder,
self.infer_target_placeholder,
self.infer_class_placeholder])
def dequeue(self, num_elements):
return self.queue.dequeue_many(num_elements)
def dequeue_test(self, num_elements):
return self.test_queue.dequeue_many(num_elements)
def dequeue_test_ext(self, num_elements):
return self.test_ext_queue.dequeue_many(num_elements)
def dequeue_test_real(self, num_elements):
return self.test_real_queue.dequeue_many(num_elements)
def dequeue_disc(self, num_elements):
return self.disc_queue.dequeue_many(num_elements)
def dequeue_infer(self, num_elements):
return self.infer_queue.dequeue_many(num_elements)
# During training, select spectrogram frames centered around waveform piece if random_crop enbaled; otherwise randomly
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.train_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=self.augment_speech,
inject_noise=self.inject_noise,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
cut_length=self.sample_size*5.0)
print("Loading Data...")
for input_waveform, _, target_waveform, binary_label, speaker_binary_label, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
# padding
lag = self.target_size
# random_start = int(randint(0, lag, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
total_frames = input_spec.shape[0]
# print(total_frames)
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# print(np.argmax(binary_label), input_waveform.shape, target_waveform.shape, input_spec.shape if self.spec_generator else 0)
# input_waveform and target_waveform are now of same length, and with 0-padding in front
if not self.random_crop:
while len(input_waveform) > self.sample_size:
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[lag:, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[lag:, :]
if self.spec_generator:
if self.hint_window<=0:
hint_piece = input_spec
else:
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.hint_placeholder: hint_piece,
self.speaker_hint_placeholder: speaker_hint_piece,
self.target_placeholder: target_piece})
else:
length = input_waveform.shape[0]
num_pieces = 1
# print(num_pieces)
indices = randint(0, length-self.sample_size, num_pieces)
# if self.spec_generator:
# spec_indices = librosa.core.samples_to_frames(indices + int(self.sample_size/2), hop_length=self.spec_generator.shift, n_fft=self.spec_generator.n_fft)
for i in range(num_pieces):
idx = indices[i]
central = int(idx + self.sample_size/2-self.target_size/2)
piece = input_waveform[idx:idx+self.sample_size, :]
target_piece = target_waveform[central:central+self.target_size, :]
if self.spec_generator:
if self.hint_window<=0:
hint_piece = input_spec
else:
# random_spec = spec_indices[i]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), total_frames)
# hint_piece = input_spec[random_start:random_end, :]
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.hint_placeholder: hint_piece,
self.speaker_hint_placeholder: speaker_hint_piece,
self.target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, _, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if self.coord.should_stop():
stop = True
break
print("test:", np.argmax(binary_label), number, input_waveform.shape, target_waveform.shape)
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
print("test: from spec generator", input_spec.shape)
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
print("test: from binary label", hint_piece.shape)
else:
hint_piece = np.zeros((1, self.hint_size))
print("test: from dummy zeros", hint_piece.shape)
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_enqueue,
feed_dict={self.test_placeholder: piece,
self.test_hint_placeholder: hint_piece,
self.test_speaker_hint_placeholder: speaker_hint_piece,
self.test_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_ext(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_test_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, _, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if self.coord.should_stop():
stop = True
break
print("Ext test:", np.argmax(binary_label), number, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_ext_enqueue,
feed_dict={self.test_ext_placeholder: piece,
self.test_ext_hint_placeholder: hint_piece,
self.test_ext_speaker_hint_placeholder: speaker_hint_piece,
self.test_ext_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_real(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = query_joint_yield_pair(gender=self.test_data_range["gender"], num=self.test_data_range["num"],
script=self.test_data_range["script"], device=None,
scene=None, directory=self.directory, directory_produced=self.directory,
exam_ignored=False,
randomized=True,
sample_rate=self.sample_rate)
for input_waveform, target_waveform, book in mix_generator:
if self.coord.should_stop():
stop = True
break
print("Real test:", book, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_id, speaker_binary_label = extract_speaker_id(book["gender"]+str(book["num"]))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
count = 0
max_count = 10
random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
input_waveform = input_waveform[random_start:, :]
target_waveform = target_waveform[random_start:, :]
while len(input_waveform) > self.sample_size and count<max_count:
count = count + 1
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[int(self.target_size/2):, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[int(self.target_size/2):, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_real_enqueue,
feed_dict={self.test_real_placeholder: piece,
self.test_real_hint_placeholder: hint_piece,
self.test_real_speaker_hint_placeholder: speaker_hint_piece,
self.test_real_target_placeholder: target_piece})
# During inference, use entire audio file for spectrogram frames
def thread_infer(self, sess, start_idx=0, end_idx=271):
stop = False
infer_sample_size = self.target_size * 11 + self.sample_size
# Go through the dataset multiple times
full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
mix_generator = MixGeneratorSpec_single(self.test_filenames, my_reverb_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=False,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, _, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if mix_generator.epoch_index>1:
print("All finished")
stop = True
self.infer_queue.close(cancel_pending_enqueues=False)
break
# input_waveform = input_waveform[170000:]
# target_waveform = target_waveform[170000:]
if self.coord.should_stop():
stop = True
break
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.infer_enqueue,
feed_dict={self.infer_placeholder: piece,
self.infer_hint_placeholder: hint_piece,
self.infer_speaker_hint_placeholder: speaker_hint_piece,
self.infer_target_placeholder: target_piece,
self.infer_class_placeholder: np.array([number])})
# During inference, use entire audio file for spectrogram frames
def thread_infer_real(self, sess):
stop = False
infer_sample_size = self.target_size * 11 + self.sample_size
# Go through the dataset multiple times
mix_generator = query_joint_yield_pair(gender=self.test_data_range["gender"], num=self.test_data_range["num"],
script=self.test_data_range["script"], device=None,
scene=None, directory=self.directory, directory_produced=self.directory,
exam_ignored=False,
randomized=False,
sample_rate=self.sample_rate)
for input_waveform, target_waveform, book in mix_generator:
if self.coord.should_stop():
stop = True
break
target_waveform, keep_indices = trim_silence(target_waveform[:, 0])
target_waveform = target_waveform.reshape(-1, 1)
input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:infer_sample_size, :])
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_id, speaker_binary_label = extract_speaker_id(book["gender"]+str(book["num"]))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
label = book["device"]+"_"+book["scene"]+"|"+book["gender"]+str(book["num"])+"_script"+str(book["script"])
sess.run(self.infer_enqueue,
feed_dict={self.infer_placeholder: piece,
self.infer_hint_placeholder: hint_piece,
self.infer_speaker_hint_placeholder: speaker_hint_piece,
self.infer_target_placeholder: target_piece,
self.infer_class_placeholder: np.array([label])})
def thread_disc(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise)
for input_waveform, _, target_waveform, binary_label, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
while len(target_waveform) > self.sample_size:
target_piece = target_waveform[:self.target_size, :]
input_piece = input_waveform[:self.target_size, :]
target_waveform = target_waveform[int(self.target_size/2):, :]
input_waveform = input_waveform[int(self.target_size/2):, :]
sess.run(self.disc_enqueue,
feed_dict={self.disc_placeholder: target_piece,
self.disc_ref_placeholder: input_piece})
def start_threads(self, sess, n_threads=1):
for i in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Generator train data loader thread ("+str(i+1)+"/"+str(n_threads)+") starts.")
thread = threading.Thread(target=self.thread_test, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Generator val data loader thread (1/1) starts.")
thread = threading.Thread(target=self.thread_test_ext, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Generator test data loader thread (1/1) starts.")
# thread = threading.Thread(target=self.thread_test_real, args=(sess,))
# thread.daemon = True # Thread will close when parent quits.
# thread.start()
# self.threads.append(thread)
# print("Generator test real data loader thread (1/1) starts.")
if self.disc_thread_enabled:
thread = threading.Thread(target=self.thread_disc, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Discriminator data loader thread (1/1) starts.")
return self.threads
def start_infer_threads(self, sess, sim=True, n_threads=1):
if sim:
total_num = len(self.reverb_train_filenames+self.reverb_test_filenames)
piece = int(math.ceil(total_num/n_threads))
for i in range(n_threads):
thread = threading.Thread(target=self.thread_infer, args=(sess, i*piece, min((i+1)*piece, total_num)))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Generator sim infer data loader thread ("+str(i+1)+"/"+str(n_threads)+") starts.")
else:
thread = threading.Thread(target=self.thread_infer_real, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
print("Generator real infer data loader thread (1/1) starts.")
return self.threads
def output_audio(self, path, wav):
librosa.output.write_wav(path, wav, self.sample_rate)
class DataReader_pair(DataReader):
'''Generic background audio reader that preprocesses audio files
and en
s them into a TensorFlow queue.'''
def __init__(self,
directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=True,
queue_size=32,
data_range=CLEAN_DATA_RANGE,
test_data_range=CLEAN_TEST_DATA_RANGE,
disc_thread_enabled=True,
spec_generator=None,
use_label_class=False,
hint_window=256,
inject_noise=False,
augment_reverb=False,
augment_speech=True,
norm_volume=False,
stft_similarity=None,
norm_reverb_peak=True):
super().__init__(directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=random_crop,
queue_size=queue_size,
data_range=data_range,
test_data_range=test_data_range,
disc_thread_enabled=disc_thread_enabled,
spec_generator=spec_generator,
use_label_class=use_label_class,
hint_window=hint_window,
inject_noise=inject_noise,
augment_reverb=augment_reverb,
augment_speech=augment_speech,
norm_volume=norm_volume,
stft_similarity=stft_similarity,
norm_reverb_peak=norm_reverb_peak)
######
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder,
self.src_hint_placeholder,
self.tgt_hint_placeholder,
self.target_placeholder])
"""For val set"""
self.test_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1)])
self.test_enqueue = self.test_queue.enqueue([self.test_placeholder,
self.test_src_hint_placeholder,
self.test_tgt_hint_placeholder,
self.test_target_placeholder])
"""For test set"""
self.test_ext_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_ext_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_ext_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1)])
self.test_ext_enqueue = self.test_ext_queue.enqueue([self.test_ext_placeholder,
self.test_ext_src_hint_placeholder,
self.test_ext_tgt_hint_placeholder,
self.test_ext_target_placeholder])
"""For inference"""
self.infer_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_src_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_tgt_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'string', 'string'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1), (None,), (None,)])
self.infer_enqueue = self.infer_queue.enqueue([self.infer_placeholder,
self.infer_src_hint_placeholder,
self.infer_tgt_hint_placeholder,
self.infer_target_placeholder,
self.infer_src_class_placeholder,
self.infer_tgt_class_placeholder])
"""For real set"""
self.test_real_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1)])
self.test_real_enqueue = self.test_real_queue.enqueue([self.test_real_placeholder,
self.test_real_src_hint_placeholder,
self.test_real_tgt_hint_placeholder,
self.test_real_target_placeholder])
# During training, select spectrogram frames centered around waveform piece if random_crop enbaled; otherwise randomly
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_pair(self.train_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=self.augment_speech,
inject_noise=self.inject_noise,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
cut_length=self.sample_size*5.0)
print("Loading Data...")
for input_waveform, hint_waveform, target_waveform, _, src_binary_label, tgt_binary_label, _, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
# padding
lag = self.target_size
# random_start = int(randint(0, lag, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
src_total_frames = input_spec.shape[0]
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
tgt_total_frames = target_spec.shape[0]
elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# print(np.argmax(binary_label), input_waveform.shape, target_waveform.shape, input_spec.shape if self.spec_generator else 0)
# input_waveform and target_waveform are now of same length, and with 0-padding in front
if not self.random_crop:
while len(input_waveform) > self.sample_size:
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[lag:, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[lag:, :]
if self.spec_generator:
if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
else:
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
src_hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
random_spec = int(randint(0, target_spec.shape[0] - self.hint_window, 1))
tgt_hint_piece = target_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.src_hint_placeholder: src_hint_piece,
self.tgt_hint_placeholder: tgt_hint_piece,
self.target_placeholder: target_piece})
else:
length = input_waveform.shape[0]
num_pieces = 1
# print(num_pieces)
indices = randint(0, length-self.sample_size, num_pieces)
# if self.spec_generator:
# spec_indices = librosa.core.samples_to_frames(indices + int(self.sample_size/2), hop_length=self.spec_generator.shift, n_fft=self.spec_generator.n_fft)
for i in range(num_pieces):
idx = indices[i]
central = int(idx + self.sample_size/2-self.target_size/2)
piece = input_waveform[idx:idx+self.sample_size, :]
target_piece = target_waveform[central:central+self.target_size, :]
if self.spec_generator:
if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
else:
# random_spec = spec_indices[i]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), src_total_frames)
# src_hint_piece = input_spec[random_start:random_end, :]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), tgt_total_frames)
# tgt_hint_piece = target_spec[random_start:random_end, :]
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
src_hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
random_spec = int(randint(0, target_spec.shape[0] - self.hint_window, 1))
tgt_hint_piece = target_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.src_hint_placeholder: src_hint_piece,
self.tgt_hint_placeholder: tgt_hint_piece,
self.target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
# for input_waveform, hint_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
if self.coord.should_stop():
stop = True
break
# print("test:", np.argmax(src_binary_label), np.argmax(tgt_binary_label), src_number, tgt_number, input_waveform.shape, target_waveform.shape)
print("test: (studio)", np.argmax(tgt_binary_label), tgt_number, input_waveform.shape, target_waveform.shape)
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
print("test: from spec generator", input_spec.shape, target_spec.shape)
elif self.use_label_class and self.hint_size==len(tgt_binary_label): # and self.hint_size==len(src_binary_label)
# src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
src_hint_piece = np.zeros_like((1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
print("test: from binary label", src_hint_piece.shape, tgt_hint_piece.shape)
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
print("test: from dummy zeros", src_hint_piece.shape, tgt_hint_piece.shape)
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_enqueue,
feed_dict={self.test_placeholder: piece,
self.test_src_hint_placeholder: src_hint_piece,
self.test_tgt_hint_placeholder: tgt_hint_piece,
self.test_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_ext(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_test_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
# for input_waveform, hint_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
if self.coord.should_stop():
stop = True
break
# print("Ext test:", np.argmax(src_binary_label), np.argmax(tgt_binary_label), src_number, tgt_number, input_waveform.shape, target_waveform.shape)
print("Ext test: (studio) ", np.argmax(tgt_binary_label), tgt_number, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_ext_enqueue,
feed_dict={self.test_ext_placeholder: piece,
self.test_ext_src_hint_placeholder: src_hint_piece,
self.test_ext_tgt_hint_placeholder: tgt_hint_piece,
self.test_ext_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_real(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = query_joint_yield_pair(gender=self.test_data_range["gender"], num=self.test_data_range["num"],
script=self.test_data_range["script"], device=None,
scene=None, directory=self.directory, directory_produced=self.directory,
exam_ignored=False,
randomized=True,
sample_rate=self.sample_rate)
for target_waveform, input_waveform, book in mix_generator:
if self.coord.should_stop():
stop = True
break
print("Real test:", book, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(target_waveform[:, 0])
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
count = 0
max_count = 10
random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
input_waveform = input_waveform[random_start:, :]
target_waveform = target_waveform[random_start:, :]
while len(input_waveform) > self.sample_size and count<max_count:
count = count + 1
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[int(self.target_size/2):, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[int(self.target_size/2):, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_real_enqueue,
feed_dict={self.test_real_placeholder: piece,
self.test_real_src_hint_placeholder: src_hint_piece,
self.test_real_tgt_hint_placeholder: tgt_hint_piece,
self.test_real_target_placeholder: target_piece})
# # During inference, use entire audio file for spectrogram frames
# def thread_infer(self, sess, start_idx=0, end_idx=271):
# stop = False
# # Go through the dataset multiple times
# full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
# my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
# mix_generator = MixGeneratorSpec_pair(self.test_filenames, my_reverb_filenames, self.noise_filenames, self.sample_rate,
# speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
# num_classes=200,
# shuffle=False,
# augment_speech=False,
# inject_noise=self.inject_noise,
# augment_reverb=False,
# raw_stft_similarity_score=self.raw_stft_similarity_score)
# for input_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
# if mix_generator.epoch_index>1:
# print("All finished")
# stop = True
# self.infer_queue.close(cancel_pending_enqueues=False)
# break
# if self.coord.should_stop():
# stop = True
# break
# # target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# # self.silence_threshold)
# # target_waveform = target_waveform.reshape(-1, 1)
# # if target_waveform.size == 0:
# # print("Warning: {} was ignored as it contains only "
# # "silence. Consider decreasing trim_silence "
# # "threshold, or adjust volume of the audio."
# # .format(id_dict))
# # input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
# if self.spec_generator:
# input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
# target_spec = self.spec_generator.__preprocess__(target_waveform[:, 0])
# elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
# src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
# tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
# else:
# src_hint_piece = np.zeros((1, self.hint_size))
# tgt_hint_piece = np.zeros((1, self.hint_size))
# count = 0
# max_count = 10
# # random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# # input_waveform = input_waveform[random_start:, :]
# # target_waveform = target_waveform[random_start:, :]
# while len(input_waveform) > self.sample_size and count<max_count:
# count = count + 1
# piece = input_waveform[:self.sample_size, :]
# input_waveform = input_waveform[int(self.target_size/2):, :]
# start = int(self.sample_size/2-self.target_size/2)
# target_piece = target_waveform[start:start+self.target_size, :]
# target_waveform = target_waveform[int(self.target_size/2):, :]
# if self.spec_generator:
# # if self.hint_window<=0:
# src_hint_piece = input_spec
# tgt_hint_piece = target_spec
# # else:
# # random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# # hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
# sess.run(self.infer_enqueue,
# feed_dict={self.infer_placeholder: piece,
# self.infer_src_hint_placeholder: src_hint_piece,
# self.infer_tgt_hint_placeholder: tgt_hint_piece,
# self.infer_target_placeholder: target_piece,
# self.infer_src_class_placeholder: np.array([int(src_number)]),
# self.infer_tgt_class_placeholder: np.array([int(tgt_number)])})
# During inference, use entire audio file for spectrogram frames
def thread_infer(self, sess, start_idx=0, end_idx=271):
stop = False
infer_sample_size = self.target_size * 11 + self.sample_size
# Go through the dataset multiple times
full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
mix_generator = MixGeneratorSpec_single(self.test_filenames, my_reverb_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=False,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
if mix_generator.epoch_index>1:
print("All finished")
stop = True
self.infer_queue.close(cancel_pending_enqueues=False)
break
# input_waveform = input_waveform[170000:]
# target_waveform = target_waveform[170000:]
if self.coord.should_stop():
stop = True
break
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
# input_waveform = input_waveform[int(self.target_size/2):, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.infer_enqueue,
feed_dict={self.infer_placeholder: piece,
self.infer_src_hint_placeholder: src_hint_piece,
self.infer_tgt_hint_placeholder: tgt_hint_piece,
self.infer_target_placeholder: target_piece,
self.infer_src_class_placeholder: np.array(["h-01|"+tgt_number.split("|")[1]]),
self.infer_tgt_class_placeholder: np.array([tgt_number])})
def thread_disc(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise)
for input_waveform, target_waveform, binary_label, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
while len(target_waveform) > self.sample_size:
target_piece = target_waveform[:self.target_size, :]
input_piece = input_waveform[:self.target_size, :]
target_waveform = target_waveform[int(self.target_size/2):, :]
input_waveform = input_waveform[int(self.target_size/2):, :]
sess.run(self.disc_enqueue,
feed_dict={self.disc_placeholder: target_piece,
self.disc_ref_placeholder: input_piece})
class DataReader_2Stage(DataReader):
'''Generic background audio reader that preprocesses audio files
and en
s them into a TensorFlow queue.'''
def __init__(self,
directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=True,
queue_size=32,
data_range=CLEAN_DATA_RANGE,
test_data_range=CLEAN_TEST_DATA_RANGE,
disc_thread_enabled=True,
spec_generator=None,
use_label_class=False,
hint_window=256,
inject_noise=False,
augment_reverb=False,
augment_speech=True,
norm_volume=False,
stft_similarity=None,
norm_reverb_peak=True):
super().__init__(directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=random_crop,
queue_size=queue_size,
data_range=data_range,
test_data_range=test_data_range,
disc_thread_enabled=disc_thread_enabled,
spec_generator=spec_generator,
use_label_class=use_label_class,
hint_window=hint_window,
inject_noise=inject_noise,
augment_reverb=augment_reverb,
augment_speech=augment_speech,
norm_volume=norm_volume,
stft_similarity=stft_similarity,
norm_reverb_peak=norm_reverb_peak)
def construct(hint_size, queue_size):
sample_ph = tf.placeholder(dtype=tf.float32, shape=None)
hint_ph = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
speaker_hint_ph = tf.placeholder(dtype=tf.float32, shape=(None, 20))
interm_ph = tf.placeholder(dtype=tf.float32, shape=None)
target_ph = tf.placeholder(dtype=tf.float32, shape=None)
queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1), (None, 1)])
enqueue = queue.enqueue([sample_ph,
hint_ph,
speaker_hint_ph,
interm_ph,
target_ph])
return sample_ph, hint_ph, speaker_hint_ph, interm_ph, target_ph, queue, enqueue
######
self.sample_placeholder, self.hint_placeholder, self.speaker_hint_placeholder, \
self.interm_placeholder, self.target_placeholder, self.queue, self.enqueue = construct(hint_size, queue_size)
"""For val set"""
self.test_placeholder, self.test_hint_placeholder, self.test_speaker_hint_placeholder, \
self.test_interm_placeholder, self.test_target_placeholder, self.test_queue, self.test_enqueue = construct(hint_size, 8)
"""For test set"""
self.test_ext_placeholder, self.test_ext_hint_placeholder, self.test_ext_speaker_hint_placeholder, \
self.test_ext_interm_placeholder, self.test_ext_target_placeholder, self.test_ext_queue, self.test_ext_enqueue = construct(hint_size, 8)
"""For inference"""
self.infer_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_speaker_hint_placeholders = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.infer_interm_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'float32', 'string'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1), (None, 1), (None,)])
self.infer_enqueue = self.infer_queue.enqueue([self.infer_placeholder,
self.infer_hint_placeholder,
self.infer_speaker_hint_placeholders,
self.infer_interm_placeholder,
self.infer_target_placeholder,
self.infer_class_placeholder])
"""For real set"""
self.test_real_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_speaker_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 20))
self.test_real_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, 20), (None, 1)])
self.test_real_enqueue = self.test_real_queue.enqueue([self.test_real_placeholder,
self.test_real_hint_placeholder,
self.test_real_speaker_hint_placeholder,
self.test_real_target_placeholder])
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.train_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=self.augment_speech,
inject_noise=self.inject_noise,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
cut_length=self.sample_size*5.0)
print("Loading Data...")
for input_waveform, interm_waveform, target_waveform, binary_label, speaker_binary_label, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
# padding
lag = self.target_size
# random_start = int(randint(0, lag, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
total_frames = input_spec.shape[0]
# print(total_frames)
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# print(np.argmax(binary_label), input_waveform.shape, target_waveform.shape, input_spec.shape if self.spec_generator else 0)
# input_waveform and target_waveform are now of same length, and with 0-padding in front
if not self.random_crop:
while len(input_waveform) > self.sample_size:
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[lag:, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[lag:, :]
start_2 = int((self.sample_size-self.target_size)/4)
interm_piece = interm_waveform[start_2:self.sample_size-start_2, :]
interm_waveform = interm_waveform[lag:, :]
if self.spec_generator:
if self.hint_window<=0:
hint_piece = input_spec
else:
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.hint_placeholder: hint_piece,
self.speaker_hint_placeholder: speaker_hint_piece,
self.interm_placeholder: interm_piece,
self.target_placeholder: target_piece})
else:
length = input_waveform.shape[0]
num_pieces = 1
# print(num_pieces)
indices = randint(0, length-self.sample_size, num_pieces)
# if self.spec_generator:
# spec_indices = librosa.core.samples_to_frames(indices + int(self.sample_size/2), hop_length=self.spec_generator.shift, n_fft=self.spec_generator.n_fft)
for i in range(num_pieces):
idx = indices[i]
central = int(idx + (self.sample_size-self.target_size)/2)
piece = input_waveform[idx:idx+self.sample_size, :]
target_piece = target_waveform[central:central+self.target_size, :]
start_2 = int((self.sample_size-self.target_size)/4)
interm_piece = interm_waveform[int(idx+start_2):int(idx+self.sample_size-start_2), :]
if self.spec_generator:
if self.hint_window<=0:
hint_piece = input_spec
else:
# random_spec = spec_indices[i]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), total_frames)
# hint_piece = input_spec[random_start:random_end, :]
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
# print("train:", np.argmax(binary_label), piece.shape, hint_piece.shape, speaker_hint_piece.shape, interm_piece.shape, target_piece.shape)
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.hint_placeholder: hint_piece,
self.speaker_hint_placeholder: speaker_hint_piece,
self.interm_placeholder: interm_piece,
self.target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, interm_waveform, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if self.coord.should_stop():
stop = True
break
print("test:", np.argmax(binary_label), number, input_waveform.shape, target_waveform.shape)
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
# print("test: from spec generator", input_spec.shape)
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
# print("test: from binary label", hint_piece.shape)
else:
hint_piece = np.zeros((1, self.hint_size))
# print("test: from dummy zeros", hint_piece.shape)
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
interm_piece = interm_waveform[start_2:infer_sample_size-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_enqueue,
feed_dict={self.test_placeholder: piece,
self.test_hint_placeholder: hint_piece,
self.test_speaker_hint_placeholder: speaker_hint_piece,
self.test_interm_placeholder: interm_piece,
self.test_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_ext(self, sess):
stop = False
infer_sample_size = self.target_size * 3 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_test_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, interm_waveform, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if self.coord.should_stop():
stop = True
break
print("Ext test:", np.argmax(binary_label), number, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
interm_piece = interm_waveform[start_2:infer_sample_size-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_ext_enqueue,
feed_dict={self.test_ext_placeholder: piece,
self.test_ext_hint_placeholder: hint_piece,
self.test_ext_speaker_hint_placeholder: speaker_hint_piece,
self.test_ext_interm_placeholder: interm_piece,
self.test_ext_target_placeholder: target_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_real(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = query_joint_yield_pair(gender=self.test_data_range["gender"], num=self.test_data_range["num"],
script=self.test_data_range["script"], device=None,
scene=None, directory=self.directory, directory_produced=self.directory,
exam_ignored=False,
randomized=True,
sample_rate=self.sample_rate)
for input_waveform, target_waveform, book in mix_generator:
if self.coord.should_stop():
stop = True
break
print("Real test:", book, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_id, speaker_binary_label = extract_speaker_id(book["gender"]+str(book["num"]))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
count = 0
max_count = 10
random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
input_waveform = input_waveform[random_start:, :]
target_waveform = target_waveform[random_start:, :]
while len(input_waveform) > self.sample_size and count<max_count:
count = count + 1
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[int(self.target_size/2):, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[int(self.target_size/2):, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_real_enqueue,
feed_dict={self.test_real_placeholder: piece,
self.test_real_hint_placeholder: hint_piece,
self.test_real_speaker_hint_placeholder: speaker_hint_piece,
self.test_real_target_placeholder: target_piece})
# During inference, use entire audio file for spectrogram frames
def thread_infer(self, sess, start_idx=0, end_idx=271):
stop = False
infer_sample_size = self.target_size * 11 + self.sample_size
# Go through the dataset multiple times
full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
mix_generator = MixGeneratorSpec_single(self.test_filenames, my_reverb_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=False,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
SNRdB=[20],
cut_length=infer_sample_size)
for input_waveform, interm_waveform, target_waveform, binary_label, speaker_binary_label, number in mix_generator:
if mix_generator.epoch_index>1:
print("All finished")
stop = True
self.infer_queue.close(cancel_pending_enqueues=False)
break
# input_waveform = input_waveform[170000:]
# target_waveform = target_waveform[170000:]
if self.coord.should_stop():
stop = True
break
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
elif self.use_label_class and self.hint_size==len(binary_label):
hint_piece = np.reshape(binary_label, (1, self.hint_size))
else:
hint_piece = np.zeros((1, self.hint_size))
speaker_hint_piece = np.reshape(speaker_binary_label, (1, -1))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
interm_piece = interm_waveform[start_2:infer_sample_size-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
hint_piece = input_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.infer_enqueue,
feed_dict={self.infer_placeholder: piece,
self.infer_hint_placeholder: hint_piece,
self.infer_speaker_hint_placeholder: speaker_hint_piece,
self.infer_interm_placeholder: interm_piece,
self.infer_target_placeholder: target_piece,
self.infer_class_placeholder: np.array([number])})
class DataReader_pair_multi(DataReader):
'''Generic background audio reader that preprocesses audio files
and en
s them into a TensorFlow queue.'''
def __init__(self,
directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=True,
queue_size=32,
data_range=CLEAN_DATA_RANGE,
test_data_range=CLEAN_TEST_DATA_RANGE,
disc_thread_enabled=True,
spec_generator=None,
use_label_class=False,
hint_window=256,
inject_noise=False,
augment_reverb=False,
augment_speech=True,
norm_volume=False,
stft_similarity=None,
norm_reverb_peak=True):
super().__init__(directory,
coord,
sample_size,
hint_size,
target_size,
sample_rate,
random_crop=random_crop,
queue_size=queue_size,
data_range=data_range,
test_data_range=test_data_range,
disc_thread_enabled=disc_thread_enabled,
spec_generator=spec_generator,
use_label_class=use_label_class,
hint_window=hint_window,
inject_noise=inject_noise,
augment_reverb=augment_reverb,
augment_speech=augment_speech,
norm_volume=norm_volume,
stft_similarity=stft_similarity,
norm_reverb_peak=norm_reverb_peak)
######
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.clean_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1), (None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder,
self.src_hint_placeholder,
self.tgt_hint_placeholder,
self.target_placeholder,
self.clean_target_placeholder])
"""For val set"""
self.test_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_clean_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1), (None, 1)])
self.test_enqueue = self.test_queue.enqueue([self.test_placeholder,
self.test_src_hint_placeholder,
self.test_tgt_hint_placeholder,
self.test_target_placeholder,
self.test_clean_target_placeholder])
"""For test set"""
self.test_ext_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_ext_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_ext_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_clean_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_ext_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1), (None, 1)])
self.test_ext_enqueue = self.test_ext_queue.enqueue([self.test_ext_placeholder,
self.test_ext_src_hint_placeholder,
self.test_ext_tgt_hint_placeholder,
self.test_ext_target_placeholder,
self.test_ext_clean_target_placeholder])
"""For inference"""
self.infer_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.infer_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_clean_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.infer_src_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_tgt_class_placeholder = tf.placeholder(dtype=tf.string, shape=None)
self.infer_queue = tf.PaddingFIFOQueue(queue_size,
['float32', 'float32', 'float32', 'float32', 'float32', 'string', 'string'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1), (None, 1), (None,), (None,)])
self.infer_enqueue = self.infer_queue.enqueue([self.infer_placeholder,
self.infer_src_hint_placeholder,
self.infer_tgt_hint_placeholder,
self.infer_target_placeholder,
self.infer_clean_target_placeholder,
self.infer_src_class_placeholder,
self.infer_tgt_class_placeholder])
"""For real set"""
self.test_real_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_src_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_tgt_hint_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, hint_size))
self.test_real_target_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.test_real_queue = tf.PaddingFIFOQueue(8,
['float32', 'float32', 'float32', 'float32'],
shapes=[(None, 1), (None, hint_size), (None, hint_size), (None, 1)])
self.test_real_enqueue = self.test_real_queue.enqueue([self.test_real_placeholder,
self.test_real_src_hint_placeholder,
self.test_real_tgt_hint_placeholder,
self.test_real_target_placeholder])
# During training, select spectrogram frames centered around waveform piece if random_crop enbaled; otherwise randomly
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_pair(self.train_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=self.augment_speech,
inject_noise=self.inject_noise,
augment_reverb=self.augment_reverb,
norm_volume=self.norm_volume,
norm_reverb_peak=self.norm_reverb_peak,
cut_length=self.sample_size*5.0)
print("Loading Data...")
for input_waveform, hint_waveform, target_waveform, clean_waveform, src_binary_label, tgt_binary_label, _, _ in mix_generator:
if self.coord.should_stop():
stop = True
break
# padding
lag = self.target_size
random_start = int(randint(0, lag, 1))
input_waveform = input_waveform[random_start:, :]
target_waveform = target_waveform[random_start:, :]
clean_waveform = clean_waveform[random_start:, :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
src_total_frames = input_spec.shape[0]
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
tgt_total_frames = target_spec.shape[0]
elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# print(np.argmax(binary_label), input_waveform.shape, target_waveform.shape, input_spec.shape if self.spec_generator else 0)
# input_waveform and target_waveform are now of same length, and with 0-padding in front
if not self.random_crop:
while len(input_waveform) > self.sample_size:
piece = input_waveform[:self.sample_size, :]
input_waveform = input_waveform[lag:, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:start+self.target_size, :]
target_waveform = target_waveform[lag:, :]
start_2 = int((self.sample_size-self.target_size)/4)
clean_piece = clean_waveform[start_2:self.sample_size-start_2, :]
clean_waveform = clean_waveform[lag:, :]
if self.spec_generator:
if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
else:
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
src_hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
random_spec = int(randint(0, target_spec.shape[0] - self.hint_window, 1))
tgt_hint_piece = target_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.src_hint_placeholder: src_hint_piece,
self.tgt_hint_placeholder: tgt_hint_piece,
self.target_placeholder: target_piece,
self.clean_target_placeholder: clean_piece})
else:
length = input_waveform.shape[0]
num_pieces = 1
# print(num_pieces)
indices = randint(0, length-self.sample_size, num_pieces)
# if self.spec_generator:
# spec_indices = librosa.core.samples_to_frames(indices + int(self.sample_size/2), hop_length=self.spec_generator.shift, n_fft=self.spec_generator.n_fft)
for i in range(num_pieces):
idx = indices[i]
central = int(idx + self.sample_size/2-self.target_size/2)
piece = input_waveform[idx:idx+self.sample_size, :]
target_piece = target_waveform[central:central+self.target_size, :]
start_2 = int((self.sample_size-self.target_size)/4)
clean_piece = clean_waveform[int(idx+start_2):int(idx+self.sample_size-start_2), :]
if self.spec_generator:
if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
else:
# random_spec = spec_indices[i]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), src_total_frames)
# src_hint_piece = input_spec[random_start:random_end, :]
# random_shift = randint(-int(self.hint_window/4), int(self.hint_window/4), 1)
# random_start = max(0, int(random_spec - self.hint_window/2 + random_shift))
# random_end = min(int(random_spec + self.hint_window/2 + random_shift), tgt_total_frames)
# tgt_hint_piece = target_spec[random_start:random_end, :]
random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
src_hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
random_spec = int(randint(0, target_spec.shape[0] - self.hint_window, 1))
tgt_hint_piece = target_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece,
self.src_hint_placeholder: src_hint_piece,
self.tgt_hint_placeholder: tgt_hint_piece,
self.target_placeholder: target_piece,
self.clean_target_placeholder: clean_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test(self, sess):
stop = False
infer_sample_size = self.target_size * 4 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_train_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
# for input_waveform, hint_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
if self.coord.should_stop():
stop = True
break
# print("test:", np.argmax(src_binary_label), np.argmax(tgt_binary_label), src_number, tgt_number, input_waveform.shape, target_waveform.shape)
print("test: (studio)", np.argmax(tgt_binary_label), tgt_number, input_waveform.shape, target_waveform.shape)
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
print("test: from spec generator", input_spec.shape, target_spec.shape)
elif self.use_label_class and self.hint_size==len(tgt_binary_label): # and self.hint_size==len(src_binary_label)
# src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
src_hint_piece = np.zeros_like((1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
print("test: from binary label", src_hint_piece.shape, tgt_hint_piece.shape)
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
print("test: from dummy zeros", src_hint_piece.shape, tgt_hint_piece.shape)
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
clean_piece = piece[start_2:-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_enqueue,
feed_dict={self.test_placeholder: piece,
self.test_src_hint_placeholder: src_hint_piece,
self.test_tgt_hint_placeholder: tgt_hint_piece,
self.test_target_placeholder: target_piece,
self.test_clean_target_placeholder: clean_piece})
# During testing, use entire audio file for spectrogram frames
def thread_test_ext(self, sess):
stop = False
infer_sample_size = self.target_size * 4 + self.sample_size
# Go through the dataset multiple times
mix_generator = MixGeneratorSpec_single(self.test_filenames, self.reverb_test_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=True,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
# for input_waveform, hint_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
if self.coord.should_stop():
stop = True
break
# print("Ext test:", np.argmax(src_binary_label), np.argmax(tgt_binary_label), src_number, tgt_number, input_waveform.shape, target_waveform.shape)
print("Ext test: (studio) ", np.argmax(tgt_binary_label), tgt_number, input_waveform.shape, target_waveform.shape)
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
clean_piece = piece[start_2:-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.test_ext_enqueue,
feed_dict={self.test_ext_placeholder: piece,
self.test_ext_src_hint_placeholder: src_hint_piece,
self.test_ext_tgt_hint_placeholder: tgt_hint_piece,
self.test_ext_target_placeholder: target_piece,
self.test_ext_clean_target_placeholder: clean_piece})
# # During inference, use entire audio file for spectrogram frames
# def thread_infer(self, sess, start_idx=0, end_idx=271):
# stop = False
# # Go through the dataset multiple times
# full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
# my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
# mix_generator = MixGeneratorSpec_pair(self.test_filenames, my_reverb_filenames, self.noise_filenames,
# speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
# sample_rate=self.sample_rate,
# num_classes=200,
# shuffle=False,
# augment_speech=False,
# inject_noise=self.inject_noise,
# augment_reverb=False,
# raw_stft_similarity_score=self.raw_stft_similarity_score)
# for input_waveform, target_waveform, src_binary_label, tgt_binary_label, src_number, tgt_number in mix_generator:
# if mix_generator.epoch_index>1:
# print("All finished")
# stop = True
# self.infer_queue.close(cancel_pending_enqueues=False)
# break
# if self.coord.should_stop():
# stop = True
# break
# # target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# # self.silence_threshold)
# # target_waveform = target_waveform.reshape(-1, 1)
# # if target_waveform.size == 0:
# # print("Warning: {} was ignored as it contains only "
# # "silence. Consider decreasing trim_silence "
# # "threshold, or adjust volume of the audio."
# # .format(id_dict))
# # input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
# if self.spec_generator:
# input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
# target_spec = self.spec_generator.__preprocess__(target_waveform[:, 0])
# elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
# src_hint_piece = np.reshape(src_binary_label, (1, self.hint_size))
# tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
# else:
# src_hint_piece = np.zeros((1, self.hint_size))
# tgt_hint_piece = np.zeros((1, self.hint_size))
# count = 0
# max_count = 10
# # random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# # input_waveform = input_waveform[random_start:, :]
# # target_waveform = target_waveform[random_start:, :]
# while len(input_waveform) > self.sample_size and count<max_count:
# count = count + 1
# piece = input_waveform[:self.sample_size, :]
# input_waveform = input_waveform[int(self.target_size/2):, :]
# start = int(self.sample_size/2-self.target_size/2)
# target_piece = target_waveform[start:start+self.target_size, :]
# target_waveform = target_waveform[int(self.target_size/2):, :]
# if self.spec_generator:
# # if self.hint_window<=0:
# src_hint_piece = input_spec
# tgt_hint_piece = target_spec
# # else:
# # random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# # hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
# sess.run(self.infer_enqueue,
# feed_dict={self.infer_placeholder: piece,
# self.infer_src_hint_placeholder: src_hint_piece,
# self.infer_tgt_hint_placeholder: tgt_hint_piece,
# self.infer_target_placeholder: target_piece,
# self.infer_src_class_placeholder: np.array([int(src_number)]),
# self.infer_tgt_class_placeholder: np.array([int(tgt_number)])})
# During inference, use entire audio file for spectrogram frames
def thread_infer(self, sess, start_idx=0, end_idx=271):
stop = False
infer_sample_size = self.target_size * 11 + self.sample_size
# Go through the dataset multiple times
full_reverb_filenames=self.reverb_train_filenames+self.reverb_test_filenames
my_reverb_filenames=full_reverb_filenames[start_idx:end_idx]
mix_generator = MixGeneratorSpec_single(self.test_filenames, my_reverb_filenames, self.noise_filenames, self.sample_rate,
speech_data_holder=None, reverb_data_holder=None, noise_data_holder=None,
num_classes=200,
shuffle=False,
augment_speech=False,
inject_noise=self.inject_noise,
augment_reverb=False,
norm_volume=self.norm_volume,
raw_stft_similarity_score=self.raw_stft_similarity_score,
norm_reverb_peak=False,
cut_length=infer_sample_size)
for hint_waveform, target_waveform, input_waveform, tgt_binary_label, tgt_number in mix_generator:
if mix_generator.epoch_index>1:
print("All finished")
stop = True
self.infer_queue.close(cancel_pending_enqueues=False)
break
# input_waveform = input_waveform[170000:]
# target_waveform = target_waveform[170000:]
if self.coord.should_stop():
stop = True
break
# target_waveform, keep_indices = trim_silence(target_waveform[:, 0],
# self.silence_threshold)
# target_waveform = target_waveform.reshape(-1, 1)
# if target_waveform.size == 0:
# print("Warning: {} was ignored as it contains only "
# "silence. Consider decreasing trim_silence "
# "threshold, or adjust volume of the audio."
# .format(id_dict))
# input_waveform = input_waveform[keep_indices[0]:keep_indices[-1], :]
if self.spec_generator:
input_spec = self.spec_generator.__preprocess__(input_waveform[:, 0])
target_spec = self.spec_generator.__preprocess__(hint_waveform[:, 0])
elif self.use_label_class and self.hint_size==len(src_binary_label) and self.hint_size==len(tgt_binary_label):
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.reshape(tgt_binary_label, (1, self.hint_size))
else:
src_hint_piece = np.zeros((1, self.hint_size))
tgt_hint_piece = np.zeros((1, self.hint_size))
# random_start = int(randint(0, input_waveform.shape[0]-max_count*self.sample_size, 1))
# input_waveform = input_waveform[random_start:, :]
# target_waveform = target_waveform[random_start:, :]
piece = input_waveform[:infer_sample_size, :]
# input_waveform = input_waveform[int(self.target_size/2):, :]
start = int((self.sample_size-self.target_size)/2)
target_piece = target_waveform[start:infer_sample_size-start, :]
start_2 = int((self.sample_size-self.target_size)/4)
clean_piece = piece[start_2:-start_2, :]
if self.spec_generator:
# if self.hint_window<=0:
src_hint_piece = input_spec
tgt_hint_piece = target_spec
# else:
# random_spec = int(randint(0, input_spec.shape[0] - self.hint_window, 1))
# hint_piece = input_spec[random_spec:random_spec + self.hint_window, :]
sess.run(self.infer_enqueue,
feed_dict={self.infer_placeholder: piece,
self.infer_src_hint_placeholder: src_hint_piece,
self.infer_tgt_hint_placeholder: tgt_hint_piece,
self.infer_target_placeholder: target_piece,
self.infer_clean_target_placeholder: clean_target_piece,
self.infer_src_class_placeholder: np.array(["h-01|"+tgt_number.split("|")[1]]),
self.infer_tgt_class_placeholder: np.array([tgt_number])})
DATAREADER_FACTORY = {"DataReader": DataReader, "DataReader_pair": DataReader_pair, "DataReader_pair_multi": DataReader_pair_multi}
|
command.py
|
import concurrent.futures
import logging
import os
import queue
import re
import threading
from pathlib import Path
from typing import Optional, Tuple
import boto3
import botocore
from botocore.endpoint import MAX_POOL_CONNECTIONS
from botocore.exceptions import NoCredentialsError
from .exceptions import DirectoryDoesNotExistError
from .exceptions import NoCredentialsError as S3FetchNoCredentialsError
from .exceptions import PermissionError as S3FetchPermissionError
from .exceptions import RegexError
logging.basicConfig()
class S3Fetch:
def __init__(
self,
s3_uri: str,
region: str = "us-east-1",
debug: bool = False,
download_dir: Optional[str] = None,
regex: Optional[str] = None,
threads: Optional[int] = None,
dry_run: bool = False,
delimiter: str = "/",
quiet: bool = False,
) -> None:
"""S3Fetch
:param s3_uri: S3 URI
:type s3_uri: str
:param region: AWS region the bucket is located in, defaults to "us-east-1"
:type region: str, optional
:param debug: Enable debug output, defaults to False
:type debug: bool, optional
:param download_dir: Directory to download objects to, defaults to None
:type download_dir: Optional[str], optional
:param regex: Regex to use to filter objects, defaults to None
:type regex: Optional[str], optional
:param threads: Number of threads to use, 1 thread used per object, defaults to None
:type threads: Optional[int], optional
:param dry_run: Enable dry run mode, don't actually download anything, defaults to False
:type dry_run: bool, optional
:param delimiter: S3 object path delimiter, defaults to "/"
:type delimiter: str, optional
:param quiet: Don't print anything to stdout, defaults to False
:type delimiter: bool, optional
"""
self._logger = logging.getLogger("s3fetch")
self._logger.setLevel(logging.DEBUG if debug else logging.INFO)
self._bucket, self._prefix = self._parse_and_split_s3_uri(s3_uri, delimiter)
self._debug = debug
self._regex = regex
self._dry_run = dry_run
self._delimiter = delimiter
self._quiet = quiet
if self._dry_run and not self._quiet:
print("Operating in dry run mode. Will not download objects.")
self._download_dir = self._determine_download_dir(download_dir)
self._threads = threads or len(os.sched_getaffinity(0))
self._logger.debug(f"Using {self._threads} threads.")
# https://stackoverflow.com/questions/53765366/urllib3-connectionpool-connection-pool-is-full-discarding-connection
# https://github.com/boto/botocore/issues/619#issuecomment-461859685
# max_pool_connections here is passed to the max_size param of urllib3.HTTPConnectionPool()
connection_pool_connections = max(MAX_POOL_CONNECTIONS, self._threads)
client_config = botocore.config.Config(
max_pool_connections=connection_pool_connections,
)
self.client = boto3.client("s3", region_name=region, config=client_config)
self._object_queue = queue.Queue()
self._failed_downloads = []
self._successful_downloads = 0
self._keyboard_interrupt_exit = threading.Event()
self._print_lock = threading.Lock()
def _parse_and_split_s3_uri(self, s3_uri: str, delimiter: str) -> Tuple[str, str]:
"""Parse and split the S3 URI into bucket and path prefix.
:param s3_uri: S3 URI
:type s3_uri: str
:param delimiter: S3 path delimiter.
:type delimiter: str
:return: Tuple containing the S3 bucket and path prefix.
:rtype: Tuple[str, str]
"""
tmp_path = s3_uri.replace("s3://", "", 1)
try:
bucket, prefix = tmp_path.split(delimiter, maxsplit=1)
except ValueError:
bucket = tmp_path
prefix = ""
self._logger.debug(f"bucket={bucket}, prefix={prefix}")
return bucket, prefix
def _determine_download_dir(self, download_dir: Optional[str]) -> Path:
"""Determine the correct download directory to use.
:param download_dir: Download directory, `None` results in current directory.
:type download_dir: Optional[str]
:raises DirectoryDoesNotExistError: Raised if the specified directory does not exist.
:return: Path object representing the download directory.
:rtype: Path
"""
if not download_dir:
download_directory = Path(os.getcwd())
else:
download_directory = Path(download_dir)
if not download_directory.is_dir():
raise DirectoryDoesNotExistError(
f"The directory '{download_directory}' does not exist."
)
self._logger.debug(f"download_directory={download_directory}")
return Path(download_directory)
def _retrieve_list_of_objects(self) -> None:
"""Retrieve a list of objects in the S3 bucket under the specified path prefix."""
if not self._quiet:
prefix = f"'{self._prefix}'" if self._prefix else "no prefix"
print(f"Listing objects in bucket '{self._bucket}' with prefix {prefix}...")
paginator = self.client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=self._bucket, Prefix=self._prefix):
if "Contents" not in page:
if not self._quiet:
print("No objects found under prefix.")
break
if self._keyboard_interrupt_exit.is_set():
raise KeyboardInterrupt
for key in filter(
self._filter_object,
(obj["Key"] for obj in page["Contents"]),
):
self._object_queue.put_nowait(key)
# Send sentinel value indicating pagination complete.
self._object_queue.put_nowait(None)
def run(self) -> None:
"""Executes listing, filtering and downloading objects from the S3 bucket."""
try:
threading.Thread(target=self._retrieve_list_of_objects).start()
self._download_objects()
self._check_for_failed_downloads()
except NoCredentialsError as e:
raise S3FetchNoCredentialsError(e) from e
def _download_objects(self) -> None:
"""Download objects from the specified S3 bucket and path prefix."""
if not self._quiet:
print("Starting downloads...")
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads
) as executor:
futures = {}
while True:
item = self._object_queue.get(block=True)
if item is None: # Check for sentinel value
break
futures[item] = executor.submit(self._download_object, item)
for key, future in futures.items():
try:
future.result()
self._successful_downloads += 1
except KeyboardInterrupt:
if not self._quiet:
print("\nThreads are exiting...")
executor.shutdown(wait=False)
self._keyboard_interrupt_exit.set()
raise
except Exception as e:
self._failed_downloads.append((key, e))
def _check_for_failed_downloads(self) -> None:
"""Print out a list of objects that failed to download (if any)."""
if self._failed_downloads and not self._quiet:
print()
print(f"{len(self._failed_downloads)} objects failed to download.")
for key, reason in self._failed_downloads:
print(f"{key}: {reason}")
def _rollup_prefix(self, key: str) -> Tuple[Optional[str], str]:
# First roll up everything under the prefix to the right most delimiter, leaving us with the object key
# after the rolled up prefix.
# Example for prefix of '/example/obj'
# /example/objects/obj1
# /example/objects/obj2
# Result: objects/obj1 & objects/obj2
# Determine rollup prefix
if self._prefix:
# Get prefix up to last delimiter
try:
rollup_prefix, _ = self._prefix.rsplit(self._delimiter, maxsplit=1)
except ValueError:
rollup_prefix = None
else:
rollup_prefix = None
# Remove prefix from key
if rollup_prefix:
_, tmp_key = key.rsplit(rollup_prefix + self._delimiter, maxsplit=1)
else:
tmp_key = key
# Split object key into directory and filename
try:
directory, filename = tmp_key.rsplit(self._delimiter, maxsplit=1)
except ValueError:
directory = None
filename = tmp_key
return directory, filename
def _download_object(self, key: str) -> None:
"""Download S3 object from the specified bucket.
:param key: S3 object key
:type key: str
:raises KeyboardInterrupt: Raised hit user cancels operation with CTRL-C.
:raises S3FetchPermissionError: Raised if a permission error is encountered when writing object to disk.
"""
tmp_dest_directory, tmp_dest_filename = self._rollup_prefix(key)
if tmp_dest_directory:
destination_directory = self._download_dir / Path(tmp_dest_directory)
else:
destination_directory = self._download_dir
if not destination_directory.is_dir():
try:
destination_directory.mkdir(parents=True)
except FileExistsError:
pass
destination_filename = destination_directory / Path(tmp_dest_filename)
if self._keyboard_interrupt_exit.is_set():
raise KeyboardInterrupt
self._logger.debug(f"Downloading s3://{self._bucket}{self._delimiter}{key}")
try:
if not self._dry_run:
self.client.download_file(
Bucket=self._bucket,
Key=key,
Filename=str(destination_filename),
Callback=self._download_callback,
)
except PermissionError as e:
if not self._quiet:
self._tprint(f"{key}...error")
raise S3FetchPermissionError(
f"Permission error when attempting to write object to {destination_filename}"
) from e
else:
if not self._keyboard_interrupt_exit.is_set():
if not self._quiet:
self._tprint(f"{key}...done")
def _download_callback(self, *args, **kwargs):
"""boto3 callback, called whenever boto3 finishes downloading a chunk of an S3 object.
:raises SystemExit: Raised if KeyboardInterrupt is raised in the main thread.
"""
if self._keyboard_interrupt_exit.is_set():
self._logger.debug("Main thread has told us to exit, so exiting.")
raise SystemExit(1)
def _filter_object(self, key: str) -> bool:
"""Filter function for the `filter()` call used to determine if an
object key should be included in the list of objects to download.
:param key: S3 object key.
:type key: str
:returns: True if object key matches regex or no regex provided. False otherwise.
:raises RegexError: Raised if the regular expression is invalid.
"""
# Discard key if it's a 'directory'
if key.endswith(self._delimiter):
return False
if not self._regex:
self._logger.debug("No regex detected.")
return True
try:
rexp = re.compile(rf"{self._regex}")
except re.error as e:
msg = f"Regex error: {repr(e)}"
if self._debug:
raise RegexError(msg) from e
raise RegexError(msg)
if rexp.search(key):
self._logger.debug(f"Object {key} matched regex, added to object list.")
return True
else:
self._logger.debug(f"Object {key} did not match regex, skipped.")
return False
def _tprint(self, msg: str) -> None:
"""Thread safe printing.
:param msg: Text to print to the screen.
:type msg: str
"""
self._print_lock.acquire(timeout=1)
print(msg)
self._print_lock.release()
|
engine.py
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import json
import logging
import os
import pkgutil
import shutil
import sys
import threading
import time
import traceback
import uuid
from distutils.version import LooseVersion
from urllib import parse
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException
from bzt.utils import reraise, load_class, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient, Environment
from bzt.utils import NETWORK_PROBLEMS
from .dicts import Configuration
from .modules import Provisioning, Reporter, Service, Aggregator, EngineModule
from .names import EXEC, TAURUS_ARTIFACTS_DIR, SETTINGS
from .templates import Singletone
from ..environment_helpers import expand_variable_with_os, custom_expandvars, expand_envs_with_os
from bzt.resources.version import VERSION, DEV_VERSION
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]EXEC
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.aggregator.engine = self
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self.user_pythonpath = None
self.temp_pythonpath = None
self._http_client = None
self.graceful_tmp = None
def set_pythonpath(self):
version = sys.version.split(' ')[0]
path_suffix = os.path.join('python-packages', version)
self.user_pythonpath = get_full_path(os.path.join("~", ".bzt", path_suffix))
self.temp_pythonpath = get_full_path(os.path.join(self.artifacts_dir, path_suffix))
current_pythonpath = os.environ.get('PYTHONPATH', '')
paths = current_pythonpath, self.temp_pythonpath, self.user_pythonpath
self.log.debug("Set PYTHONPATH to :\n\tCURRENT: '{}' +\n\tTEMP: '{}' +\n\tUSER: '{}'".format(*paths))
try:
user_packages = os.listdir(self.user_pythonpath)
except:
user_packages = []
self.log.debug("Content of user packages dir: {}".format(user_packages))
os.environ['PYTHONPATH'] = os.pathsep.join(paths)
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions: # type: BetterDict
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
self.graceful_tmp = self.create_artifact(prefix="GRACEFUL", suffix="")
env = env.get()
env['GRACEFUL'] = self.graceful_tmp
return shell_exec(args, cwd=cwd, env=env, **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
if self.graceful_tmp and not os.path.exists(self.graceful_tmp):
open(self.graceful_tmp, 'x').close()
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if self.graceful_tmp and os.path.exists(self.graceful_tmp):
os.remove(self.graceful_tmp)
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.is_error = True if self.stopping_reason else None
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
module.is_error = True
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = self.__expand_artifacts_dir()
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "")
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def __expand_artifacts_dir(self):
envs = self.__get_envs_from_config()
artifacts_dir = custom_expandvars(self.artifacts_dir, envs)
artifacts_dir = expand_variable_with_os(artifacts_dir)
artifacts_dir = get_full_path(artifacts_dir)
return artifacts_dir
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
return False
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
if VERSION == DEV_VERSION:
return
params = (VERSION, install_id)
addr = "https://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
try:
response = client.request('GET', addr, timeout=10)
except NETWORK_PROBLEMS:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
return
data = response.json()
latest = data.get('latest')
needs_upgrade = data.get('needsUpgrade')
if latest is None or needs_upgrade is None:
self.log.warning(f'Wrong updates info: "{data}"')
else:
self.log.debug(f'Taurus updates info: "{data}"')
mine = LooseVersion(VERSION)
if (mine < latest) or needs_upgrade:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.__get_envs_from_config()
envs = expand_envs_with_os(envs)
def apply_env(value, key, container):
if isinstance(value, str):
container[key] = custom_expandvars(value, envs)
BetterDict.traverse(self.config, apply_env)
self.__export_variables_to_os()
def __export_variables_to_os(self):
"""
Export all user-defined environment variables to the system.
Example:
settings:
env:
FOO: bbb/ccc
BAR: aaa
"""
envs = self.__get_envs_from_config()
for var_name in envs:
if envs[var_name] is None:
if var_name in os.environ:
os.environ.pop(var_name)
else:
os.environ[var_name] = envs[var_name]
self.log.debug("OS env: %s=%s", var_name, envs[var_name])
def __get_envs_from_config(self):
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
return envs
|
generate-dataset-canny.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 20/05/2018 2:45 PM
# File Name : generate-dataset-canny.py
import numpy as np
import sys
import pickle
from dexnet.grasping.quality import PointGraspMetrics3D
from dexnet.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler
from dexnet.grasping import RobotGripper, GraspableObject3D, GraspQualityConfigFactory, PointGraspSampler
import dexnet
from autolab_core import YamlConfig
from meshpy.obj_file import ObjFile
from meshpy.sdf_file import SdfFile
import os
import multiprocessing
import matplotlib.pyplot as plt
plt.switch_backend('agg') # for the convenient of run on remote computer
def get_file_name(file_dir_):
return sorted(next(os.walk(file_dir_))[1])
def do_job(i):
object_name = os.path.split(file_list_all[i])[-1]
good_grasp = []
for ____ in range(7):
_good_grasp = multiprocessing.Manager().list()
p_set = [multiprocessing.Process(target=worker, args=(i, 100, 20, _good_grasp)) for _ in
range(7)]
[p.start() for p in p_set]
[p.join() for p in p_set]
good_grasp += list(_good_grasp)
good_grasp_file_name = "./generated_grasps/{}_{}_{}".format(filename_prefix, str(object_name), str(len(good_grasp)))
with open(good_grasp_file_name + '.pickle', 'wb') as f:
pickle.dump(good_grasp, f)
tmp = []
for grasp in good_grasp:
grasp_config = grasp[0].configuration
score_friction = grasp[1]
score_canny = grasp[2]
tmp.append(np.concatenate([grasp_config, [score_friction, score_canny]]))
np.save(good_grasp_file_name + '.npy', np.array(tmp), allow_pickle=True, fix_imports=True)
print("finished job ", object_name)
def worker(i, sample_nums, grasp_amount, good_grasp):
object_name = os.path.split(file_list_all[i])[-1]
print('a worker of task {} start'.format(object_name))
yaml_config = YamlConfig(home_dir + "/code/grasp-pointnet/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/code/grasp-pointnet/dex-net/data/grippers")
grasp_sample_method = "antipodal"
if grasp_sample_method == "uniform":
ags = UniformGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gaussian":
ags = GaussianGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "antipodal":
ags = AntipodalGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gpg":
ags = GpgGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "point":
ags = PointGraspSampler(gripper, yaml_config)
else:
raise NameError("Can't support this sampler")
print("Log: do job", i)
if os.path.exists(str(file_list_all[i]) + "/google_512k/nontextured.obj"):
of = ObjFile(str(file_list_all[i]) + "/google_512k/nontextured.obj")
sf = SdfFile(str(file_list_all[i]) + "/google_512k/nontextured.sdf")
else:
print("can't find any obj or sdf file!")
raise NameError("can't find any obj or sdf file!")
mesh = of.read()
sdf = sf.read()
obj = GraspableObject3D(sdf, mesh)
print("Log: opened object", i + 1, object_name)
force_closure_quality_config = {}
canny_quality_config = {}
fc_list_sub1 = np.arange(2.0, 0.75, -0.4)
fc_list_sub2 = np.arange(0.5, 0.36, -0.05)
fc_list = np.concatenate([fc_list_sub1, fc_list_sub2])
for value_fc in fc_list:
value_fc = round(value_fc, 2)
yaml_config['metrics']['force_closure']['friction_coef'] = value_fc
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
force_closure_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['force_closure'])
canny_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['robust_ferrari_canny'])
good_count_perfect = np.zeros(len(fc_list))
count = 0
max_attempt_n = 20
minimum_grasp_per_fc = grasp_amount
while np.sum(good_count_perfect < minimum_grasp_per_fc) != 0 and max_attempt_n>0:
grasps = ags.generate_grasps(obj, target_num_grasps=sample_nums, grasp_gen_mult=10,
vis=False, random_approach_angle=True)
count += len(grasps)
for j in grasps:
tmp, is_force_closure = False, False
for ind_, value_fc in enumerate(fc_list):
value_fc = round(value_fc, 2)
tmp = is_force_closure
is_force_closure = PointGraspMetrics3D.grasp_quality(j, obj,
force_closure_quality_config[value_fc], vis=False)
if tmp and not is_force_closure:
if good_count_perfect[ind_ - 1] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[
round(fc_list[ind_ - 1], 2)],
vis=False)
good_grasp.append((j, round(fc_list[ind_ - 1], 2), canny_quality))
good_count_perfect[ind_ - 1] += 1
break
elif is_force_closure and value_fc == fc_list[-1]:
if good_count_perfect[ind_] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[value_fc], vis=False)
good_grasp.append((j, value_fc, canny_quality))
good_count_perfect[ind_] += 1
break
print('Object:{} GoodGrasp:{}'.format(object_name, good_count_perfect))
max_attempt_n -= 1
object_name_len = len(object_name)
object_name_ = str(object_name) + " " * (25 - object_name_len)
if count == 0:
good_grasp_rate = 0
else:
good_grasp_rate = len(good_grasp) / count
print('Gripper:{} Object:{} Rate:{:.4f} {}/{}'.
format(gripper_name, object_name_, good_grasp_rate, len(good_grasp), count))
if __name__ == '__main__':
if len(sys.argv) > 1:
filename_prefix = sys.argv[1]
else:
filename_prefix = "default"
if not os.path.exists('./generated_grasps'):
os.makedirs('./generated_grasps')
home_dir = os.environ['HOME']
file_dir = home_dir + "/dataset/ycb_meshes_google/objects"
if len(sys.argv) > 2:
file_dir = sys.argv[2]
file_list_all = list(map(lambda x : file_dir + '/' + x, get_file_name(file_dir)))
object_numbers = file_list_all.__len__()
job_list = np.arange(object_numbers)
job_list = list(job_list)
pool_size = 4 # number of jobs did at same time
if len(sys.argv) > 3:
pool_size = int(sys.argv[3])
assert (pool_size <= len(job_list))
# Initialize pool
pool = []
for _ in range(pool_size):
job_i = job_list.pop(0)
pool.append(multiprocessing.Process(target=do_job, args=(job_i,)))
[p.start() for p in pool]
# refill
while len(job_list) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
job_i = job_list.pop(0)
p = multiprocessing.Process(target=do_job, args=(job_i,))
p.start()
pool.append(p)
break
print('All job done.')
|
poisson_tests.py
|
import math, sys, threading, time, requests, random, uuid, json
from time import sleep
from dataclasses import dataclass
@dataclass
class ConnectivityServiceData:
start_TS: int = -1
inter_arrival_time: float = -0.1
end_TS: int = -1
type: str = ''
result: str = ''
uuid: str = ''
ber: bool = True
def millis():
return int(round(time.time() * 1000))
def poisson_wait_time(lmb):
p = random.random()
inter_arrival_time = -math.log(1.0 - p) / lmb
# print('inter_arrival_time: {}'.format(inter_arrival_time))
return inter_arrival_time
class Connectivity:
def __init__(self, lmb, connections):
self.log = []
self.start_time = -1
self.end_time = -1
self.total_holding_time = 0 # no l'entenc. borrar
#list of available NEP/SIPs in D2
self.endpointsD2 = {
'available_input':[
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "a9b6a9a3-99c5-5b37-bc83-d087abf94ceb"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "291796d9-a492-5837-9ccb-24389339d18a"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "b1655446-a3e1-5077-8006-0277028b9179"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "5b139e0a-a967-5acf-be83-617c8586840f"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "00672024-10f1-5dbe-95a6-265d60889a86"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "f9dc5177-f923-5873-a8eb-c40a8b90312a"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "e258f1cf-b870-5edf-bd6f-cbe86989bdcd"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "e21b5b3e-5bd4-567c-b819-f5e6ac689c68"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "9853c355-5d3a-5f46-b4bd-e94de00e902f"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "9180bfbf-9ad7-5145-8bb8-9fd8d6b2db9a"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "1b7dbfae-2ef5-5d7c-b4ff-be8fba395f6d"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "e9f2cd83-622f-5693-a3ea-edda0238f675"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "fc7d3da3-31c2-5f7a-868c-fe824919a2e4"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "edb7c0b6-0a87-5ecd-84a2-0fb5ed015550"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "92bc2016-ae6d-530d-ba08-b2637c3eabce"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"5c9809bc-33a9-5214-ab03-b256457b4b46", "sip_uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"5c9809bc-33a9-5214-ab03-b256457b4b46", "sip_uuid": "0e047118-5aee-5721-979e-2fece9b45fb2"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"f8fa6c3a-1840-5d2d-939d-d231cd9c50f1", "sip_uuid": "589c2048-0b7f-59c7-b893-514949faea32"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"f8fa6c3a-1840-5d2d-939d-d231cd9c50f1", "sip_uuid": "075a2ea8-c642-5b8b-9d32-8f97218af55c"}
],
'available_output':[
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "0d29c715-fa35-5eaf-8be8-20cc73d8a4e6"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "fbdd154e-659e-54df-8d75-23575711978b"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "42d64587-8763-5917-bbd6-8f6a8b8d2700"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "9f65970c-24ae-5e17-b86e-d5cf25df589e"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "7e9dc6c7-63d5-5709-aaec-e1dcf243b22b"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "5e34d63c-b23f-5fbb-909e-5c6ed6b13f4f"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "b2c55979-0f9b-52e3-a767-deedebe12547"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "9ce4373d-acca-5edd-b5b0-533057776a2f"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "df71fece-6979-5f50-88cb-a88e94dc684e"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "b10c4b7d-1c2f-5f25-a239-de4daaa622ac"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "d7ee402b-7f9e-5468-86ec-113c5ec22707"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "223421ea-579e-5020-9924-0027c91e12a2"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "71941e6f-0f69-53d4-820e-f4efc5d3364b"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "f15278a8-8d93-5594-af08-18e9c4104af8"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "a6e6da0a-c2ea-5a2e-b901-fcac4abed95a"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"5c9809bc-33a9-5214-ab03-b256457b4b46", "sip_uuid": "367b19b1-3172-54d8-bdd4-12d3ac5604f6"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"5c9809bc-33a9-5214-ab03-b256457b4b46", "sip_uuid": "84e9eef3-11c2-5710-89f1-bf355cacb7c3"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"f8fa6c3a-1840-5d2d-939d-d231cd9c50f1", "sip_uuid": "f1737854-81ef-5a98-9a27-0ae89619ba1e"},
{"context_uuid": "226b9166-974e-57ff-821d-2f24e5a71b00", "node_uuid":"f8fa6c3a-1840-5d2d-939d-d231cd9c50f1", "sip_uuid": "a53e9d1e-8045-591a-8ed8-8b8164ae9d6b"}
],
'occupied_input': [], #[{"cs_uuid": "uuid", "endpoint_info": {}}]
'occupied_output': [] #[{"cs_uuid": "uuid", "endpoint_info": {}}]
}
#list of available NEP/SIPs in the other domains
self.endpoints = {
'available_input':[
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "a9b6a9a3-99c5-5b37-bc83-d087abf94ceb"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "291796d9-a492-5837-9ccb-24389339d18a"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "9180bfbf-9ad7-5145-8bb8-9fd8d6b2db9a"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "92bc2016-ae6d-530d-ba08-b2637c3eabce"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "a9b6a9a3-99c5-5b37-bc83-d087abf94ceb"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "9180bfbf-9ad7-5145-8bb8-9fd8d6b2db9a"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "92bc2016-ae6d-530d-ba08-b2637c3eabce"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "a9b6a9a3-99c5-5b37-bc83-d087abf94ceb"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "9180bfbf-9ad7-5145-8bb8-9fd8d6b2db9a"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "92bc2016-ae6d-530d-ba08-b2637c3eabce"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "ce525139-c58a-5ee7-a527-94e344d8fa5e"}
],
'available_output':[
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "0d29c715-fa35-5eaf-8be8-20cc73d8a4e6"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "fbdd154e-659e-54df-8d75-23575711978b"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "b10c4b7d-1c2f-5f25-a239-de4daaa622ac"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"},
{"context_uuid": "0bd7908e-c22b-574d-8bba-396d060e2611", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "a6e6da0a-c2ea-5a2e-b901-fcac4abed95a"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "0d29c715-fa35-5eaf-8be8-20cc73d8a4e6"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "b10c4b7d-1c2f-5f25-a239-de4daaa622ac"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"},
{"context_uuid": "3d89bd76-e54d-5fab-9787-eb609f291ee0", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "a6e6da0a-c2ea-5a2e-b901-fcac4abed95a"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "79516f5e-55a0-5671-977a-1f5cc934e700"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"589df6c1-90e1-51f5-bda4-b4cd6b2d01e4", "sip_uuid": "0d29c715-fa35-5eaf-8be8-20cc73d8a4e6"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "30d9323e-b916-51ce-a9a8-cf88f62eb77f"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"69c1ef5e-0d4a-5fb7-b729-b494313d3dc5", "sip_uuid": "b10c4b7d-1c2f-5f25-a239-de4daaa622ac"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "68ac012e-54d4-5846-b5dc-6ec356404f90"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "a6e6da0a-c2ea-5a2e-b901-fcac4abed95a"},
{"context_uuid": "627ea8a3-f3cf-578b-b0dd-9e65a5a0e0a3", "node_uuid":"bc1cfe05-af20-59e7-85b8-eb096253c769", "sip_uuid": "bd512c4b-834e-5551-bbc5-0f6de0f262db"}
],
'occupied_input': [], #[{"cs_uuid": "uuid", "endpoint_info": {}}]
'occupied_output': [] #[{"cs_uuid": "uuid", "endpoint_info": {}}]
}
"""
self.endpoints = {'available': {
'cs_01': ['tx_node-nep_1', 'rx_node-nep_1'],
'cs_02': ['tx_node-nep_2', 'rx_node-nep_2'],
'cs_03': ['tx_node-nep_3', 'rx_node-nep_3'],
'cs_04': ['tx_node-nep_4', 'rx_node-nep_4']},
'occupied': {
}}
"""
self.max_connections = connections
self.watcher_thread = threading.Thread(target=self.watcher_function, args=(lmb,))
self.first_conn = None
self.n_threads = 0
self.connection_no_ber = 0
def start(self):
print('Starting watcher')
self.watcher_thread.start()
def watcher_function(self, lmb):
n_connections = 0
self.start_time = millis()
while n_connections < self.max_connections:
s_next = poisson_wait_time(lmb)
print("Wait " + str(s_next) + "seconds for the next E2E CS request.")
time.sleep(s_next)
connection = ConnectivityServiceData(inter_arrival_time=s_next)
next_thread = threading.Thread(target=self.connectivity, args=(connection, ))
next_thread.start()
n_connections = n_connections + 1
self.exit_function()
def connectivity(self, connection):
connection.type = 'CREATE'
connection.start_TS = millis()
self.n_threads = self.n_threads + 1
print("NEW E2E CS between the following Nodes/SIPs (Source & Destination):")
try:
directions_D2 = ['INPUT','OUTPUT']
selectedD2_direction = random.choice(directions_D2)
cs_uuid = str(uuid.uuid4())
if selectedD2_direction == 'INPUT':
#cs_uuid, endpoint = random.choice(list(self.endpoints['available'].items()))
endpointD2_ref = random.choice(self.endpointsD2['available_input'])
endpoint_ref = random.choice(self.endpoints['available_output'])
dst = endpointD2_ref
src = endpoint_ref
#del self.endpoints['available'][cs_uuid]
for idx, endpoint_item in enumerate(self.endpointsD2['available_input']):
if endpoint_item['sip_uuid'] == endpointD2_ref['sip_uuid']:
del self.endpointsD2['available_input'][idx]
break
for idx, endpoint_item in enumerate(self.endpoints['available_output']):
if endpoint_item['sip_uuid'] == endpoint_ref['sip_uuid']:
del self.endpoints['available_output'][idx]
break
#self.endpoints['occupied'][cs_uuid] = endpoint
#self.endpointsD2['occupied_input'][cs_uuid] = endpointD2_ref
#self.endpoints['occupied_output'][cs_uuid] = endpoint_ref
cs_json = {}
cs_json["cs_uuid"] = cs_uuid
cs_json["endpoint_info"] = endpoint_ref
print("cs_json: " + str(cs_json))
self.endpoints['occupied_output'].append(cs_json)
cs_json = {}
cs_json["cs_uuid"] = cs_uuid
cs_json["endpoint_info"] = endpointD2_ref
print("cs_json: "+ str(cs_json))
self.endpointsD2['occupied_input'].append(cs_json)
else:
#cs_uuid, endpoint = random.choice(list(self.endpoints['available'].items()))
endpoint_ref = random.choice(self.endpoints['available_input'])
endpointD2_ref = random.choice(self.endpointsD2['available_output'])
dst = endpoint_ref
src = endpointD2_ref
#del self.endpoints['available'][cs_uuid]
for idx, endpoint_item in enumerate(self.endpoints['available_input']):
if endpoint_item['sip_uuid'] == endpoint_ref['sip_uuid']:
del self.endpoints['available_input'][idx]
break
for idx, endpoint_item in enumerate(self.endpointsD2['available_output']):
if endpoint_item['sip_uuid'] == endpointD2_ref['sip_uuid']:
del self.endpointsD2['available_output'][idx]
break
#self.endpoints['occupied'][cs_uuid] = endpoint
cs_json = {}
cs_json["cs_uuid"] = cs_uuid
cs_json["endpoint_info"] = endpoint_ref
print("cs_json: " + str(cs_json))
self.endpoints['occupied_input'].append(cs_json)
cs_json = {}
cs_json["cs_uuid"] = cs_uuid
cs_json["endpoint_info"] = endpointD2_ref
print("cs_json: "+ str(cs_json))
self.endpointsD2['occupied_output'].append(cs_json)
#ports_list = ['4441','4442','4443','4444']
#selected_port = random.choice(ports_list)
selected_port = '4442'
except IndexError:
#cs_uuid, endpoint = 'cs_error', ['a', 'b']
cs_uuid = 'cs_error'
endpoint_src = {'context_uuid': 'a', 'nep_uuid': 'b', 'sip_uuid': 'c'}
endpoint_dst = {'context_uuid': 'c', 'nep_uuid': 'd', 'sip_uuid': 'e'}
print('No SIPs available: {}'.format(cs_uuid))
#print(str(src))
#print(str(dst))
#src = endpoint[0]
#src = endpoint_src
#dst = endpoint[1]
#dst = endpoint_dst
connection.uuid = cs_uuid
capacity = random.choice([75, 150, 225, 300, 375, 450, 525, 600])
#url = "http://" + ip + "/restconf/config/context/connectivity-service/" + cs_uuid
url = "http://" + ip + selected_port + "/pdl-transport/connectivity_service"
print("Selected port domaain (last number = domain): " + str(selected_port))
# print(url)
print('SEND cs: {}'.format(cs_uuid))
#response = requests.post(url, json={"uuid": cs_uuid, "src": src, "dst": dst, "capacity": capacity})
cs_json = {"cs_uuid": cs_uuid,"source": src, "destination": dst, "capacity": {"value": capacity,"unit": "GHz"}}
print('CS JSON Request: ' + str(cs_json))
#print("URL: " + str(url))
response = requests.post(url, json=cs_json)
#print("POST response: "+str(response.text) + "with status: " + str(response.status_code))
connection.end_TS = millis()
#response["description"] = [OK, No Spectrum, No route]
# waiting E2E CS deployment finishes
time.sleep(40) # awaits 20 seconds before it starts tocheck
print("Waiting the E2E CS deployment with id: " + str(cs_uuid))
while True:
url = "http://" + ip + selected_port + "/pdl-transport/connectivity_service/"+str(cs_uuid)
response = requests.get(url)
#print("WHILE LOOP FOR CS id: " + str(cs_uuid))
#print("GET response.text: "+str(response.text))
response_json = json.loads(response.text)
if response_json["status"] == []:
pass
elif response_json["status"] == "DEPLOYED" or response_json["status"] == "ERROR":
break
else:
pass
time.sleep(10) # awaits 10 seconds before it checks again
# print(response.status_code)
#if response.status_code != 201: # ERROR CASE
if response_json["status"] == "ERROR":
print('Error cs: {} -> {}'.format(cs_uuid, response_json['description']))
connection.result = response_json['description']
self.log.append(connection)
#del self.endpoints['occupied'][connection.uuid]
#self.endpoints['available'][connection.uuid] = endpoint
for idx, occupied_item in enumerate(self.endpointsD2['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpointsD2['occupied_input'][idx]
temp_list = self.endpointsD2['available_input']
temp_list.append(occupied_item["endpoint_info"])
self.endpointsD2['available_input'] = temp_list
break
for idx, occupied_item in enumerate(self.endpointsD2['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpointsD2['occupied_output'][idx]
temp_list = self.endpointsD2['available_output']
temp_list.append(occupied_item["endpoint_info"])
self.endpointsD2['available_output'] = temp_list
break
for idx, occupied_item in enumerate(self.endpoints['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpoints['occupied_output'][idx]
temp_list = self.endpoints['available_output']
temp_list.append(occupied_item["endpoint_info"])
self.endpoints['available_output'] = temp_list
break
for idx, occupied_item in enumerate(self.endpoints['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpoints['occupied_input'][idx]
temp_list = self.endpoints['available_input']
temp_list.append(occupied_item["endpoint_info"])
self.endpoints['available_input'] = temp_list
break
self.n_threads = self.n_threads - 1
return 0
else: # SUCCESSFUL CASE
print('Successful cs: {}'.format(cs_uuid))
connection.result = response_json['description']
self.log.append(connection)
s_next = poisson_wait_time(mu)
connection = ConnectivityServiceData()
connection.inter_arrival_time = s_next
connection.uuid = cs_uuid
connection.type = 'DELETE'
self.delete_cs(connection, selected_port)
def delete_cs(self, connection, selected_port):
start_ht = millis()
print("Wait " + str(connection.inter_arrival_time) + "seconds for the next E2E CS request terminate.")
time.sleep(connection.inter_arrival_time)
self.total_holding_time = self.total_holding_time + connection.inter_arrival_time
check_ht = millis() - start_ht
if check_ht/1000 < 1:
connection.ber = False
#print("Request to terminate E2E CS with ID: "+ str(connection.uuid))
connection.start_TS = millis()
try:
#endpoints = self.endpointsD2['occupied'][connection.uuid]
#endpointD2_ref = self.endpointsD2['occupied'][connection.uuid]
#endpoint_ref = self.endpoints['occupied'][connection.uuid]
for idx, occupied_item in enumerate(self.endpointsD2['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
endpointD2_ref = occupied_item["endpoint_info"]
break
for idx, occupied_item in enumerate(self.endpointsD2['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
endpointD2_ref = occupied_item["endpoint_info"]
break
for idx, occupied_item in enumerate(self.endpoints['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
endpoint_ref = occupied_item["endpoint_info"]
break
for idx, occupied_item in enumerate(self.endpoints['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
endpoint_ref = occupied_item["endpoint_info"]
break
except Exception as e:
print(str(e))
#print(self.endpoints['occupied'])
print(self.endpointsD2['occupied_output'])
print(self.endpointsD2['occupied_input'])
print(self.endpoints['occupied_output'])
print(self.endpoints['occupied_input'])
url = "http://" + ip + selected_port + "/pdl-transport/connectivity_service/terminate/" + connection.uuid
print('SEND delete cs: {}'.format(connection.uuid))
response = requests.post(url, data='')
connection.end_TS = millis()
# waiting E2E CS termination finishes
print("Waiting the E2E CS termination with id: " + str(connection.uuid))
while True:
url = "http://" + ip + selected_port + "/pdl-transport/connectivity_service/"+str(connection.uuid)
response = requests.get(url)
#print("GET response: "+str(response.text))
response_json = json.loads(response.text)
if response_json["status"] == "TERMINATED" or response_json["status"] == "ERROR":
break
time.sleep(10) # awaits 10 seconds before it checks again
if response_json["status"] == "ERROR":
print('Error delete cs: {} -> {}'.format(connection.uuid, response_json['description']))
connection.result = response_json['description']
print(response.content)
else:
print('Successful delete cs: {}'.format(connection.uuid))
connection.result = response_json['description']
#del self.endpoints['occupied'][connection.uuid]
#self.endpoints['available'][connection.uuid] = endpoints
for idx, occupied_item in enumerate(self.endpointsD2['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpointsD2['occupied_input'][idx]
temp_list = self.endpointsD2['available_input']
temp_list.append(occupied_item["endpoint_info"])
self.endpointsD2['available_input'] = temp_list
break
for idx, occupied_item in enumerate(self.endpointsD2['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpointsD2['occupied_output'][idx]
temp_list = self.endpointsD2['available_output']
temp_list.append(occupied_item["endpoint_info"])
self.endpointsD2['available_output'] = temp_list
break
for idx, occupied_item in enumerate(self.endpoints['occupied_output']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpoints['occupied_output'][idx]
temp_list = self.endpoints['available_output']
temp_list.append(occupied_item["endpoint_info"])
self.endpoints['available_output'] = temp_list
break
for idx, occupied_item in enumerate(self.endpoints['occupied_input']):
if occupied_item["cs_uuid"] == connection.uuid:
del self.endpoints['occupied_input'][idx]
temp_list = self.endpoints['available_input']
temp_list.append(occupied_item["endpoint_info"])
self.endpoints['available_input'] = temp_list
break
self.log.append(connection)
self.n_threads = self.n_threads - 1
def exit_function(self):
print('exit_function')
self.end_time = millis()
while self.n_threads != 0:
sleep(0.5)
print('Ending test')
spect_error = 0
path_error = 0
created = 0
deleted = 0
delete_error = 0
no_ber = 0
self.log.sort(key=lambda x: x.start_TS, reverse=False)
with open('log_{}_a{}_h{}_c{}.csv'.format(millis(), lmb_inv, mu_inv, connections), 'w') as filehandle:
print('WRITING log')
filehandle.write("Parameters:\n -N connections: %s\n -Inter arrival rate: %s(s)\n -Holding time: %s(s)\n" % (self.max_connections, lmb_inv, mu_inv))
for connection in self.log:
if connection.result == 'No spectrum':
spect_error = spect_error+1
elif connection.result == 'Deployment Error':
path_error = path_error + 1
elif connection.result == 'OK' and connection.type == 'CREATE':
created = created + 1
elif connection.result == 'OK' and connection.type == 'DELETE':
deleted = deleted + 1
elif connection.result != 'OK' and connection.type == 'DELETE':
delete_error = delete_error + 1
else:
print('Should not enter here')
print(connection)
if not connection.ber and connection.type == 'DELETE':
no_ber = no_ber + 1
filehandle.write("%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n" % (connection.start_TS, connection.inter_arrival_time, connection.end_TS, connection.type, connection.result, connection.ber, connection.uuid))
filehandle.write("Successfully created: %s\n" % created)
filehandle.write("Successfully deleted: %s\n" % deleted)
filehandle.write("Spectrum error: %s\n" % spect_error)
filehandle.write("Path errors: %s\n" % path_error)
filehandle.write("Error deleting: %s\n" % delete_error)
filehandle.write("Error no BER: %s\n" % no_ber)
assert created+spect_error+path_error == self.max_connections
filehandle.write("Blocking probability: %s\n" % float((spect_error+path_error)/(created+spect_error+path_error)))
running_time = float(self.end_time-self.start_time)/1000
filehandle.write("%s connections in %s seconds\n" % (self.max_connections, running_time))
cps = self.max_connections/running_time
filehandle.write("%s connections created per second\n" % (created/running_time))
filehandle.write("Average interarrival time: %s\n" % cps)
filehandle.write("Average holding time: %s\n" % (self.total_holding_time/deleted))
filehandle.write("Erlangs: %s\n" % erlang)
print('WRITTEN')
# sys.argv[1] = IP@ to send the requests
# sys.argv[2] = Inter arrival time in seconds (1/lambda)
# sys.argv[3] = Holding time in seconds (1/mu) --> always must be bigger than 1/lambda otherwise, it will lose requests for sure.
# sys.argv[4] = total number of requests
if __name__ == "__main__":
#ip = 'localhost:4441'
ip = 'localhost:'
lmb_inv = float(sys.argv[1])
mu_inv = float(sys.argv[2])
connections = float(sys.argv[3])
total_holding_time = 0
lmb = 1/lmb_inv
mu = 1/mu_inv
erlang = lmb*mu_inv
print('Starting')
print('\tIp: {}'.format(ip))
print('\tInter arrival time: {} seconds --> Lambda: {}'.format(lmb_inv, lmb))
print('\tHolding time: {} seconds --> Mu: {}'.format(mu_inv, mu))
print('\tErlangs: {}'.format(erlang))
print('\tTotal connections: {}'.format(connections))
connectivity = Connectivity(lmb, connections)
input('Start?')
connectivity.start()
|
simulation_3.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network_3
import link_3
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 # 0 means unlimited
simulation_time = 1 # give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
routing_dict = {'router_a': {"1" : 0, "2": 1}, 'router_d': {"3" : 0, "4": 1}}
object_L = [] # keeps track of objects, so we can kill their threads
# create network nodes
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
router_a = network_3.Router(routing_dict,name='router_a', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
router_b = network_3.Router(routing_dict, name='router_b', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
router_c = network_3.Router(routing_dict, name='router_c', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
router_d = network_3.Router(routing_dict, name='router_d', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
# create a Link Layer to keep track of links between network nodes
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
# add all the links
# link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(router_a, 0,router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
# start all the objects
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
# create some send events
for i in range(1):
host_1.udt_send(1,4,"from 1 to 4")
host_2.udt_send(2,4, "from 2 to 4")
host_2.udt_send(2,3, "from 2 to 3")
host_1.udt_send(1,3, "from 1 to 3")
# give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
# join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
gui.py
|
import random
import threading
import tkinter
import tkinter.scrolledtext
import collections
from operator import attrgetter
from operator import itemgetter
from time import sleep
from PluginBot import PluginBot
from PluginBot import getMessage
from PluginBot import BYTE
def rgb(red, green, blue):
return "#%02x%02x%02x" % (red, green, blue)
class GUI:
root = None
bot = None
entry = None
textOutput = None
entryMessage = ""
lastUserSuggestion = ""
channelTags = dict()
usernameList = dict()
isPluginInitialized = False
lastSortedSuggestionDict = None
previousMessageLog = collections.deque([])
messageCounter = 0
messageLogMode = 0
TITLE_TEXT = "WedrClient - IRC Client Bot"
isUsingBouncer = False
messageBuffer = 4096
def __init__(self, hostID = -1):
if (hostID == 3):
self.isUsingBouncer = True
self.root = tkinter.Tk()
self.root.title(self.TITLE_TEXT)
width = 650
height = 500
self.root.minsize(width, height)
logMessageFrame = tkinter.Frame(master = self.root)
logMessageFrame.grid(row = 0, column = 0, sticky = (tkinter.N, tkinter.W, tkinter.E, tkinter.S))
self.textOutput = tkinter.scrolledtext.ScrolledText(master = logMessageFrame, wrap = tkinter.WORD)
self.textOutput.config(state = tkinter.DISABLED)
self.textOutput.pack(expand = 1, fill = tkinter.BOTH)
userInputFrame = tkinter.Frame(master = self.root, borderwidth = 4)
userInputFrame.grid(row = 1, column = 0, sticky = (tkinter.W, tkinter.E, tkinter.S), pady = 2)
#button = tkinter.Button(master = userInputFrame, text = "Send", command = lambda: self.sendMessage(None))
#button.bind("<Return>", self.sendMessage)
#button.grid(row = 0, column = 0, sticky = (tkinter.W, tkinter.E), padx = 1.5)
self.entry = tkinter.Entry(master = userInputFrame)
self.entry.bind("<Return>", self.threadedEntryCommand)
self.entry.bind("<Tab>", lambda event: self.autocomplete(event, self.entry.get()))
self.entry.bind("<Up>", self.previousMessage)
self.entry.bind("<Down>", self.nextMessage)
self.entry.bind("<Control-Alt-Left>", lambda event: self.previousChannel(event))
self.entry.bind("<Control-Alt-Right>", lambda event: self.nextChannel(event))
self.entry.grid(row = 0, column = 0, sticky = (tkinter.W, tkinter.E), padx = 1.5)
self.root.grid_rowconfigure(0, weight = 15)
self.root.grid_rowconfigure(1, weight = 1)
self.root.grid_columnconfigure(0, weight = 1)
userInputFrame.grid_rowconfigure(0, weight = 1)
userInputFrame.grid_columnconfigure(0, weight = 1)
#userInputFrame.grid_columnconfigure(1, weight = 7)
self.bot = PluginBot(self)
self.bot.connect(hostID)
self.bot.start()
self.addChannel(self.bot.focusedChannel)
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
for i in range(0, len(sortedDict)):
self.textOutput.tag_configure(self.bot.focusedChannel, foreground = self.channelTags[sortedDict[i - len(sortedDict)]])
self.textOutput.tag_configure("red", foreground = rgb(255, 0, 0))
def run(self):
self.root.mainloop()
def print(self, text = "", user = None):
if (text != ""):
self.textOutput.config(state = "normal")
self.textOutput.insert(tkinter.END, "\n%s" % text)
try:
indexCount = int(self.textOutput.index("%s-1c" % tkinter.END).split(".")[0])
if (indexCount > self.messageBuffer):
#Index number count starts from 1.0.
# X.Y: X is the line number. Y is the character index of line X.
self.textOutput.delete("1.0", "2.0")
except Exception as err:
self.print(err)
sortedDict = sorted(self.channelTags, key = attrgetter("length"))
if (len(sortedDict) > 0):
for i in range(0, len(sortedDict)):
self.tagPattern(sortedDict[i-len(sortedDict)].name, sortedDict[i - len(sortedDict)].name)
if (self.bot != None):
self.tagUserPattern(self.bot.nickName, "red", user)
self.textOutput.see(tkinter.END)
self.textOutput.config(state = tkinter.DISABLED)
def sendMessage(self, event):
if (self.entryMessage != ""):
if (self.bot.focusedChannel == ""):
self.print("You are not in any channel.")
else:
if (len(self.entryMessage) > 432):
currentLength = len(self.entryMessage)
beginMessage = 0
while (currentLength > 432):
if (self.entryMessage[0] == "."):
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage[beginMessage:beginMessage + 432])))
tokenString = "%s PRIVMSG %s :%s" % (self.bot.nickName, self.bot.focusedChannel, self.entryMessage[beginMessage:beginMessage + 432])
self.bot.handleTokens(self.bot.makeTokens(tokenString))
else:
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage[beginMessage:beginMessage + 432])))
self.print(text = "[%s] <%s> %s" % (self.bot.focusedChannel, self.bot.nickName, self.entryMessage[beginMessage:beginMessage + 432]))
currentLength -= 432
beginMessage += 432
if (self.entryMessage[0] == "."):
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage[beginMessage:currentLength])))
tokenString = "%s PRIVMSG %s :%s" % (self.bot.nickName, self.bot.focusedChannel, self.entryMessage[beginMessage:beginMessage + currentLength])
self.bot.handleTokens(self.bot.makeTokens(tokenString))
else:
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage[beginMessage:beginMessage + currentLength])))
self.print(text = tempString)
else:
if (self.entryMessage[0] == "."):
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage)))
tokenString = "%s PRIVMSG %s :%s" % (self.bot.nickName, self.bot.focusedChannel, self.entryMessage)
self.bot.handleTokens(self.bot.makeTokens(tokenString))
else:
self.bot.s.send(BYTE("PRIVMSG %s :%s" % (self.bot.focusedChannel, self.entryMessage)))
self.print(text = "[%s] <%s> %s" % (self.bot.focusedChannel, self.bot.nickName, self.entryMessage))
self.textOutput.see(tkinter.END)
self.entry.delete(0, tkinter.END)
self.previousMessageLog.appendleft(self.entryMessage)
self.messageLogMode = 0
if (len(self.previousMessageLog) > 10):
self.previousMessageLog.pop()
def sendActionMessage(self, event, message):
if (self.entryMessage != ""):
if (self.bot.focusedChannel == ""):
self.print("You are not in any channel.")
else:
self.bot.s.send(BYTE("PRIVMSG %s :\x01ACTION %s\x01" % (self.bot.focusedChannel, message)))
self.print("[%s] * %s %s" % (self.bot.focusedChannel, self.bot.nickName, message))
self.previousMessageLog.appendleft(self.entryMessage)
self.messageLogMode = 0
if (len(self.previousMessageLog) > 10):
self.previousMessageLog.pop()
def changeNickname(self, newName):
if (self.entryMessage != ""):
self.bot.s.send(BYTE("NICK %s" % newName))
self.print("Changing nickname to %s" % newName)
return "break"
def previousMessage(self, event):
if (self.messageLogMode == 0):
if (len(self.previousMessageLog) > 0):
self.messageCounter = 0
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.previousMessageLog[self.messageCounter])
self.messageLogMode = 1
elif (self.messageLogMode == 1):
#Previous
self.messageCounter += 1
if (self.messageCounter > len(self.previousMessageLog) -1):
self.messageCounter = len(self.previousMessageLog) - 1
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.previousMessageLog[self.messageCounter])
return "break"
def nextMessage(self, event):
if (self.messageLogMode == 0):
if (len(self.previousMessageLog) > 0):
self.messageCounter = 0
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.previousMessageLog[self.messageCounter])
self.messageLogMode = 1
elif (self.messageLogMode == 1):
#Previous
self.messageCounter -= 1
if (self.messageCounter < 0):
self.messageCounter = 0
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.previousMessageLog[self.messageCounter])
return "break"
def previousChannel(self, event):
currentIndex = self.bot.channels.index(self.bot.focusedChannel)
if (currentIndex > 0):
currentIndex -= 1
else:
currentIndex = len(self.bot.channels) - 1
self.bot.focusedChannel = self.bot.channels[currentIndex]
self.print("Currently focused channel: %s" % self.bot.focusedChannel)
self.root.title("%s - %s" % (self.TITLE_TEXT, self.bot.focusedChannel))
return "break"
def nextChannel(self, event):
currentIndex = self.bot.channels.index(self.bot.focusedChannel)
if (currentIndex < len(self.bot.channels) - 1):
currentIndex += 1
else:
currentIndex = 0
self.bot.focusedChannel = self.bot.channels[currentIndex]
self.print("Currently focused channel: %s" % self.bot.focusedChannel)
self.root.title("%s - %s" % (self.TITLE_TEXT, self.bot.focusedChannel))
return "break"
def randomColor(self):
randomTextColor = "#%02x%02x%02x" % (random.randint(90, 200), random.randint(90, 200), random.randint(90, 200))
return randomTextColor
def getUserInput(self, event):
if (event != "-1"):
self.entryMessage = self.entry.get()
self.entry.delete(0, tkinter.END)
def addChannel(self, channel):
Channel = collections.namedtuple("Channel", ["name", "length"])
c = Channel(name = channel, length = len(channel))
if (c not in self.channelTags):
self.channelTags[c] = self.randomColor()
return sorted(self.channelTags, key = lambda x: x.length)
def addUser(self, user, channel):
if (channel not in self.usernameList):
self.usernameList.setdefault(channel, [])
if (user not in self.usernameList[channel]):
self.usernameList[channel].append(user)
def tagPattern(self, pattern, tag):
start = "1.0"
end = tkinter.END
self.textOutput.mark_set("matchStart", start)
self.textOutput.mark_set("matchEnd", start)
self.textOutput.mark_set("searchLimit", end)
count = tkinter.IntVar()
legitSymbols = [".", ",", " ", "!", "]", "\n", "\r", ")", "&", "?", "=", "'", '"', ";"]
while True:
index = self.textOutput.search(pattern, "matchEnd", "searchLimit", count = count, regexp = False)
if (index == "" or count.get() == 0):
break
check = False
try:
newIndex = "%s+%dc" % (index, count.get())
temp = self.textOutput.get(newIndex, "%s+1c" % newIndex)
if (temp not in legitSymbols):
check = True
except:
check = True
if (not check):
self.textOutput.mark_set("matchStart", index)
self.textOutput.mark_set("matchEnd", "%s+%sc" % (index, count.get()))
self.textOutput.tag_add(tag, "matchStart", "matchEnd")
else:
self.textOutput.mark_set("matchEnd", "%s+%sc" % (index, count.get()+1))
def tagUserPattern(self, pattern, tag, user):
start = "1.0"
end = tkinter.END
self.textOutput.mark_set("matchStart", start)
self.textOutput.mark_set("matchEnd", start)
self.textOutput.mark_set("searchLimit", end)
count = tkinter.IntVar()
newIndexOffset = 1
while True:
reg = r"(%s([^\>\]]|\,|\.|\ |\:))" % pattern
index = self.textOutput.search(reg, "matchEnd", "searchLimit", count = count, regexp = True)
if (index == "" or count.get() == 0):
break
lineIndex = "%s.0" % index.split(".")[0]
otherCount = tkinter.IntVar()
reg = r"\*\ [A-Za-z]+\ " if user == None else r"\*\ %s\ " % user
newIndex = self.textOutput.search(reg, lineIndex, "%s lineend" % lineIndex, count = otherCount, regexp = True)
if (newIndex == "" or otherCount.get() == 0):
reg = r"\<.+\>" if user == None else r"\<%s\>" % user
newIndex = self.textOutput.search(reg, lineIndex, "%s lineend" % lineIndex, count = otherCount, regexp = True)
if (newIndex == "" or otherCount.get() == 0):
self.textOutput.mark_set("matchEnd", "%s+1l" % lineIndex)
continue
else:
newIndexOffset = 1
else:
newIndexOffset = 2
newIndex = "%s.%s" % (newIndex.split(".")[0], int(newIndex.split(".")[1]) + newIndexOffset)
self.textOutput.mark_set("matchStart", newIndex)
self.textOutput.mark_set("matchEnd", "%s+%sc" % (newIndex, otherCount.get()-2))
self.textOutput.tag_add(tag, "matchStart", "matchEnd")
self.textOutput.mark_set("matchEnd", "%s+1l" % lineIndex)
def rejoin(self, event):
#Only used for initializing the bot. Do not use unless explicitly required.
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
for i in range(0, len(sortedDict)):
#self.entryMessage = ("/l %s" % sortedDict[i - len(sortedDict)].name)
#self.entryCommand("-1")
self.entryMessage = ("/j %s" % sortedDict[i - len(sortedDict)].name)
self.entryCommand("-1")
sleep(0.5)
self.entryMessage = "/c"
self.entryCommand("-1")
if (not self.isUsingBouncer):
self.entryMessage = "/u clear"
self.entryCommand("-1")
else:
self.entryMessage = " "
self.entryCommand("-1")
self.entryMessage = " "
self.entryCommand("-1")
self.print(" -- Welcome to Channel %s. Type /help for more info. --" % self.bot.focusedChannel)
self.print(" -- Type in the input text area, then press ENTER key to chat. --")
self.print(" ")
self.root.title("%s - %s" % (self.TITLE_TEXT, self.bot.focusedChannel))
return
def autocomplete(self, event, token, lower = True):
cursorIndex = self.entry.index(tkinter.INSERT)
cursorIndexBegin = cursorIndex-1
cursorIndexEnd = cursorIndex
try:
if (token[cursorIndexBegin] == " "):
cursorIndexBegin -= 1
while (token[cursorIndexBegin] != " "):
cursorIndexBegin -= 1
except:
cursorIndexBegin = 0
try:
while (token[cursorIndexEnd] != " "):
cursorIndexEnd += 1
except:
cursorIndexEnd = self.entry.index(tkinter.END)
if (cursorIndexBegin < 0):
cursorIndexBegin = 0
tempToken = token[cursorIndexBegin:cursorIndexEnd].strip(" ")
try:
if (self.lastUserSuggestion != tempToken):
tempDict = dict()
for user in self.usernameList[self.bot.focusedChannel]:
for i in range(0, len(tempToken)):
if (lower):
if (tempToken[i].lower() == user[i].lower()):
tempDict[user] = i
else:
break
else:
if (tempToken[i].upper() == user[i].upper()):
tempDict[user] = i
else:
break
self.lastSortedSuggestionDict = sorted(tempDict, key = tempDict.get, reverse = True)
if (len(self.lastSortedSuggestionDict) > 0):
self.lastUserSuggestion = self.lastSortedSuggestionDict[0]
self.entry.delete(cursorIndexBegin, cursorIndexEnd)
self.entry.insert(cursorIndexBegin, "%s " % self.lastUserSuggestion if cursorIndexBegin == 0 else " %s" % self.lastUserSuggestion)
elif (lower == True):
self.autocomplete(event, token, lower = False)
else:
#tempList = self.usernameList[self.bot.focusedChannel]
#self.lastUserSuggestion = tempList[(tempList.index(tempToken) + 1) % len(tempList)]
self.lastUserSuggestion = self.lastSortedSuggestionDict[(self.lastSortedSuggestionDict.index(tempToken) + 1) % len(self.lastSortedSuggestionDict)]
self.entry.delete(cursorIndexBegin, cursorIndexEnd)
self.entry.insert(cursorIndexBegin, "%s " % self.lastUserSuggestion if cursorIndexBegin == 0 else " %s" % self.lastUserSuggestion)
#We return the string, "break", for tcl/tkinter to drop double events, due to TAB key firing off multiple platform-specific events.
return "break"
except:
#We return the string, "break", for tcl/tkinter to drop double events, due to TAB key firing off multiple platform-specific events.
return "break"
def showUserList(self, channel):
try:
arrayList = self.usernameList[channel]
except:
self.usernameList.setdefault(channel, [])
arrayList = self.usernameList[channel]
if (len(arrayList) <= 0):
self.print("Known users list in %s is empty." % channel)
return
tempStr = ""
for i in range(0, len(arrayList)):
if (i != len(arrayList) - 1):
tempStr += "%s, " % arrayList[i]
else:
tempStr += "%s" % arrayList[i]
self.print("Known %s users list: %s" % (channel, tempStr))
def threadedEntryCommand(self, event):
t = threading.Thread(target = self.entryCommand, args = (event,))
t.setDaemon(True)
t.start()
def entryCommand(self, event):
#Handles all user inputs
#If event is str(-1), then it skips obtaining user input.
self.getUserInput(event)
if (self.entryMessage != ""):
tokens = self.entryMessage.split(" ")
if (tokens[0] == "/j" or tokens[0] == "/join"):
#Joining channels
if (len(tokens) > 2):
for i in range(1, len(tokens)):
if (tokens[i][0] != "#"):
tokens[i] = "#%s" % tokens[i]
if (tokens[i] not in self.channelTags):
self.addChannel(tokens[i])
for j in range(0, len(sortedDict)):
if (tokens[1] == sortedDict[j].name):
self.textOutput.tag_configure(tokens[1], foreground = self.channelTags[sortedDict[j]])
break
self.bot.switch(tokens[i])
elif (len(tokens) == 2):
if (tokens[1][0] != "#"):
tokens[1] = "#%s" % tokens[1]
if (tokens[1] not in self.channelTags):
sortedDict = self.addChannel(tokens[1])
for i in range(0, len(sortedDict)):
if (tokens[1] == sortedDict[i].name):
self.textOutput.tag_configure(tokens[1], foreground = self.channelTags[sortedDict[i]])
break
self.bot.switch(tokens[1])
else:
self.print("Incorrect usage: /join [channel]")
self.entry.delete(0, tkinter.END)
self.root.title("%s - %s" % (self.TITLE_TEXT, self.bot.focusedChannel))
elif (tokens[0] == "/q" or tokens[0] == "/e" or tokens[0] == "/quit" or tokens[0] == "/exit"):
#Quitting the bot client. Make sure to press any keys in the terminal/command prompt after use.
print("Quitting bot.")
self.bot.quit()
self.bot.join()
print("Quitting tkinter GUI.")
self.root.destroy()
return
elif (tokens[0] == "/i" or tokens[0] == "/identify"):
#Identifying the bot to the IRC host, only when the bot is unable to request for verbose, but the connection is still valid.
self.bot.identify()
workerThread = threading.Thread(target = self.rejoin, args = (event,))
workerThread.start()
elif (tokens[0] == "/c" or tokens[0] == "/clear"):
#Clearing the text output screen.
self.textOutput.config(state = "normal")
if (len(tokens) > 1 and tokens[1] == "tag"):
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
for i in range(0, len(sortedDict)):
self.textOutput.tag_delete(sortedDict[i].name)
self.textOutput.delete("1.0", tkinter.END)
self.textOutput.config(state = tkinter.DISABLED)
elif (tokens[0] == "/r" or tokens[0] == "/reload"):
#Reloading plugins.
self.bot.reloadAll()
elif (tokens[0] == "/f" or tokens[0] == "/focus"):
self.print("Focused channel is %s." % self.bot.focusedChannel)
elif (tokens[0] == "/u" or tokens[0] == "/userlist" or tokens[0] == "/userslist"):
if (len(tokens) > 1):
if (tokens[1] == "clear"):
self.usernameList.clear()
else:
if (tokens[1][0] != "#"):
tokens[1] = "#%s" % tokens[1]
self.showUserList(tokens[1])
else:
self.showUserList(self.bot.focusedChannel)
elif (tokens[0] == "/l" or tokens[0] == "/leave"):
#Leaving channels
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
if (len(tokens) > 2):
for i in range(1, len(tokens)):
if (tokens[i][0] != "#"):
tokens[i] = "#%s" % tokens[i]
check = False
for j in range(0, len(sortedDict)):
if (sortedDict[j].name == tokens[i]):
self.channelTags.pop(sortedDict[j])
check = True
if (check):
self.bot.leave(tokens[i], True)
self.textOutput.tag_delete(tokens[i])
if (len(self.channelTags) > 0):
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
self.bot.switch(sortedDict[len(self.channelTags)-1].name)
else:
self.bot.switch("", False)
elif (self.isUsingBouncer):
self.bot.s.send(BYTE("PART %s :%s" % (tokens[i], "I am leaving.")))
else:
self.print("Channel, %s, is not on the channel list." % tokens[i])
elif (len(tokens) == 2):
if (tokens[1][0] != "#"):
tokens[1] = "#%s" % tokens[1]
check = False
for i in range(0, len(sortedDict)):
if (sortedDict[i].name == tokens[1]):
self.channelTags.pop(sortedDict[i])
check = True
if (check):
self.bot.leave(tokens[1], True)
self.textOutput.tag_delete(tokens[1])
if (len(self.channelTags) > 0):
sortedDict = sorted(self.channelTags, key = lambda x: x.length)
self.bot.switch(sortedDict[len(self.channelTags)-1].name)
else:
self.bot.switch("", False)
elif (self.isUsingBouncer):
self.bot.s.send(BYTE("PART %s :%s" % (tokens[1], "I am leaving.")))
else:
self.print("Channel, %s, is not on the channel list." % tokens[1])
else:
self.print("Incorrect usage: /leave [channel]")
self.entry.delete(0, tkinter.END)
elif (tokens[0] == "/m" or tokens[0] == "/me"):
#Emote command
if (len(tokens) > 1):
message = getMessage(tokens, 1)
self.sendActionMessage(event, message)
else:
self.print("ACTION message is empty.")
elif (tokens[0] == "/a" or tokens[0] == "/active"):
#Gives a list of all channels the bot is active in, or has joined in.
tempList = ""
sortedDict = sorted(self.channelTags, key = lambda x: x.name)
if (len(sortedDict) <= 0):
self.print("Joined Channel List is empty.")
else:
for i in range(0, len(sortedDict)):
tempName = sortedDict[i].name
if (tempName == self.bot.focusedChannel):
tempName = "[[%s]]" % tempName
tempList += tempName
if (i < len(sortedDict)-1):
tempList += ", "
self.print("Joined Channel List: %s" % tempList)
elif (tokens[0] == "/n" or tokens[0] == "/nick"):
#Changing nicknames
if (len(tokens) > 1):
self.changeNickname(tokens[1])
elif (tokens[0] == "/?" or tokens[0] == "/help"):
#Help command.
self.print(" ")
self.print("Type anything in the input text area, then press ENTER to chat with others.")
self.print("CTRL+ALT+LEFT ARROW or CTRL+ALT+RIGHT ARROW to quickly iterate through joined channels.")
self.print("UP ARROW or DOWN ARROW to fetch the last 10 sent messages.")
self.print(" 1. /? or /help -- Bring up the bot commands.")
self.print(" 2. /a or /active -- Shows the joined channels list.")
self.print(" 3. /c or /clear -- Clear the text output screen.")
self.print(" 4. /e or /exit -- Quit the bot.")
self.print(" 5. /f or /focus -- Print currently focused channel.")
self.print(" 6. /j or /join -- Join a new channel. Channel focus will switch over.")
self.print(" 7. /l or /leave -- Leave channel. Channel focus will change.")
self.print(" 8. /m or /me -- Print ACTION message.")
self.print(" 9. /n or /nick -- Change nickname.")
self.print("10. /q or /quit -- Quit the bot.")
self.print("11. /r or /reload -- Reload all plugins. (Hotswapping is supported.)")
self.print("12. /u or /userlist -- Shows the users list.")
if (self.bot.focusedChannel == ""):
self.print(" ")
self.print("You are currently not joined in any channel.")
else:
#Send commands over.
self.sendMessage(event)
|
clientGUI.py
|
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
bot.py
|
import asyncio
import dataclasses
import datetime
import logging
import math
import multiprocessing
import os
import random
import signal
import time
import traceback
import typing
import discord
import jsonpickle
import openai
from discord import state as discord_state
DISCORD_TOKEN = "XXXXXXXXXXXXXXXXXXXXXXXX.XXXXXX.XXXXXXXXXXXXXXX-XXXXXXXXXXX" # Get one here: https://discord.com/developers/applications/
OPENAI_API_KEY = "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" # https://beta.openai.com/account/api-keys
OPENAI_ORGANIZATION = "org-XXXXXXXXXXXXXXXXXXXXXXXX"
PREFIX = '.'
CLEANUP = 60
VERIFY_CHANNEL = 885760989608431636 # Yannic Kilcher "_verification"
VERIFIED_ROLE = 821375158295592961 # Yannic Kilcher "verified"
ALLOWED_CHANNEL = 800329398691430441 # Yannic Kilcher "gpt3"
MESSAGE_CHANNEL = 760062431858262066 # Yannic Kilcher "bot-chat"
ALLOWED_GUILD = 714501525455634453 # Yannic Kilcher
SHITPOSTING_CHANNEL = 736963923521175612
PRUNING_DAYS = 60
ADMIN_USER = [690665848876171274, 191929809444667394, 699606075023949884] # ClashLuke, XMaster, Yannic
ROLES = {'reinforcement-learning': 760062682693894144, 'computer-vision': 762042823666171955,
'natural-language-processing': 762042825260007446, 'meetup': 782362139087208478,
'verified': 821375158295592961, 'homebrew-nlp': 911661603190079528,
'world-modelz': 914229949873913877}
APPROVAL_EMOJI: typing.Union[str, discord.Emoji] = "yes"
DISAPPROVAL_EMOJI: typing.Union[str, discord.Emoji] = "noo"
THREADS = 16
LOG_LEVEL = logging.DEBUG
openai.api_key = OPENAI_API_KEY
openai.organization = OPENAI_ORGANIZATION
FALLBACKS = []
CHANNEL: typing.Optional[discord.TextChannel] = None
class ExitFunctionException(Exception):
pass
@dataclasses.dataclass
class Context:
client: discord.Client
message: discord.Message
sources: dict
settings: dict
fired_messages: typing.List[asyncio.Task]
def fire(ctx: Context, *coroutine: typing.Union[typing.Coroutine, typing.Iterable[typing.Coroutine]]) -> None:
if len(coroutine) == 0:
coroutine = coroutine[0]
if isinstance(coroutine, typing.Coroutine):
coroutine = [coroutine]
ctx.fired_messages.extend([asyncio.create_task(coro) for coro in coroutine])
def debug(message: typing.Any):
if LOG_LEVEL <= logging.DEBUG:
print(message)
async def discord_check(ctx: Context, check: bool, response: str):
if check:
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send(response, reference=ctx.message))
raise ExitFunctionException
def local_check(check: bool, message: str):
if check:
debug(message)
raise ExitFunctionException
def call_gpt(prompt, settings):
debug(settings)
return openai.Completion.create(prompt=prompt, **settings['gpt3'])['choices'][0]['text']
async def basic_check(ctx: Context, permission, dm=False):
channel: discord.TextChannel = ctx.message.channel
await discord_check(ctx, not dm and not hasattr(channel, "guild"), "This command can't be used in DM.")
await discord_check(ctx, dm and hasattr(channel, "guild"), "This command only be used in DM.")
if not dm:
guild: discord.Guild = channel.guild
await discord_check(ctx, not channel.id == MESSAGE_CHANNEL or not guild.id == ALLOWED_GUILD,
"Insufficient permission. This bot can only be used in its dedicated channel on the "
"\"Yannic Kilcher\" discord server.")
if permission:
author: discord.User = ctx.message.author
await discord_check(ctx, author.id not in ADMIN_USER,
"Insufficient permission. Only the owners of this bot are allowed to run this command. "
"Try .add instead")
async def prune(ctx: Context):
channel: discord.TextChannel = ctx.message.guild.get_channel(SHITPOSTING_CHANNEL)
async for msg in channel.history(limit=None,
before=datetime.datetime.now() - datetime.timedelta(days=PRUNING_DAYS)):
try:
fire(ctx, msg.delete())
except discord.errors.NotFound:
break
async def complete(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("This command is temporarily gone, but will be back in the future! Use .add instead.",
reference=ctx.message))
# await basic_check(message, True)
# await channel.send(call_gpt(message.content[len('.complete '):], settings))
async def verify(ctx: Context):
if ctx.message.channel.id == VERIFY_CHANNEL:
fire(ctx,
ctx.message.author.add_roles(discord.utils.get(ctx.message.guild.roles, id=VERIFIED_ROLE)),
ctx.message.delete(delay=1))
async def add(ctx: Context):
await basic_check(ctx, False, True)
query = ctx.message.content[len('.add '):]
author_id = ctx.message.author.id
reply = await CHANNEL.send(f"<@{author_id}> added ```\n{query}``` to the queue. You can vote on it by clicking the "
f":{APPROVAL_EMOJI.name}: or :{DISAPPROVAL_EMOJI.name}: reactions.\n\nTo add a query "
f"yourself, send me a message like `.add YOUR PROMPT HERE` via DM!")
fire(ctx, reply.add_reaction(APPROVAL_EMOJI), reply.add_reaction(DISAPPROVAL_EMOJI))
ctx.sources[reply.id] = (query, author_id)
async def delete(ctx: Context):
await basic_check(ctx, False, True)
channel: discord.TextChannel = ctx.message.channel
query = ctx.message.content[len('.delete '):]
author_id = ctx.message.author.id
deleted = False
for reply_id, (qry, qry_author_id) in ctx.sources.items():
if author_id == qry_author_id and qry == query:
del ctx.sources[reply_id]
fire(ctx, channel.send(f"Removed query.", reference=ctx.message),
(await CHANNEL.fetch_message(reply_id)).delete())
deleted = True
break
if not deleted:
fire(ctx, channel.send(f"Didn't find query.", reference=ctx.message))
async def role(ctx: Context):
await basic_check(ctx, False)
query = ctx.message.content[len('.role '):]
channel: discord.TextChannel = ctx.message.channel
if query in ROLES:
author: discord.Member = ctx.message.author
guild: discord.Guild = ctx.message.guild
queried_role: discord.Role = guild.get_role(ROLES[query])
for role in author.roles:
role: discord.Role = role
if role == queried_role:
fire(ctx, author.remove_roles(role), channel.send(f"Removed role", reference=ctx.message))
return
fire(ctx, author.add_roles(queried_role), channel.send(f"Added role", reference=ctx.message))
else:
fire(ctx, channel.send(f"Couldn't find role", reference=ctx.message))
async def add_fallback(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
await basic_check(ctx, True)
query = ctx.message.content[len('.add_fallback '):]
FALLBACKS.append(query)
fire(ctx, channel.send(f"Added query to the fallback list. There are now {len(FALLBACKS)} queries in said list.",
reference=ctx.message))
async def await_ctx(ctx: Context):
for msg in ctx.fired_messages:
await msg
async def restart(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
await basic_check(ctx, True)
fire(ctx, channel.send(f"Restarting", reference=ctx.message), dump_queue(ctx))
await await_ctx(ctx)
os.system("python3 bot.py")
os.kill(os.getppid(), signal.SIGTERM)
async def settings(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send(''.join(["gpt3:\n\t", '\n\t'.join(sorted([f"{k}={v}" for k, v in settings['gpt3'].items()])),
'\n'
'bot:\n\t', '\n\t'.join(sorted([f"{k}={v}" for k, v in settings['bot'].items()]))]),
reference=ctx.message))
async def dump_queue(ctx: Context):
await basic_check(ctx, True)
with open("queue_dump.json", 'w') as f:
f.write(jsonpickle.dumps(dict(ctx.sources), indent=4))
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Dumped queue.", reference=ctx.message))
async def dump_settings(ctx: Context):
await basic_check(ctx, True)
with open("setting_dump.json", 'w') as f:
f.write(jsonpickle.dumps({key: dict(val) for key, val in settings.items()}, indent=4))
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Dumped settings.", reference=ctx.message))
async def dump_fallbacks(ctx: Context):
await basic_check(ctx, True)
with open("fallbacks.json", 'w') as f:
f.write(jsonpickle.dumps(FALLBACKS, indent=4))
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Dumped fallbacks.", reference=ctx.message))
async def load_fallbacks(ctx: Context):
await basic_check(ctx, True)
with open("fallbacks.json", 'w') as f:
fallbacks = jsonpickle.loads(f.read())
FALLBACKS.clear()
FALLBACKS.extend(fallbacks)
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Loaded fallbacks.", reference=ctx.message))
async def load_settings(ctx: Context):
with open("setting_dump.json", 'r') as f:
tmp = jsonpickle.loads(f.read())
for top_key, top_val in tmp.items():
for key, val in top_val.items():
ctx.settings[top_key][key] = val
await basic_check(ctx, True)
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Loaded settings.", reference=ctx.message))
async def load_queue(ctx: Context):
with open("queue_dump.json", 'r') as f:
tmp = jsonpickle.loads(f.read())
for key, val in tmp.items():
ctx.sources[key] = val
await basic_check(ctx, True)
channel: discord.TextChannel = ctx.message.channel
fire(ctx, channel.send("Loaded queue.", reference=ctx.message))
async def eval_queue(ctx: Context):
proposals = {}
for answer_id, (prompt, user_id) in ctx.sources.items():
message: discord.Message = await CHANNEL.fetch_message(answer_id)
proposals[answer_id] = [0, user_id, prompt]
for r in message.reactions:
r: discord.Reaction = r
e: discord.Emoji = r.emoji
if e.name == DISAPPROVAL_EMOJI.name:
proposals[answer_id][0] -= r.count
elif e.name == APPROVAL_EMOJI.name:
proposals[answer_id][0] += r.count
return proposals
async def queue(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
proposals = await eval_queue(ctx)
proposals = sorted([(count, prompt) for _, (count, _, prompt) in proposals.items()], reverse=True)
if len(proposals) == 0:
fire(ctx, channel.send("Queue is empty", reference=ctx.message))
return
fire(ctx, channel.send('\n\n\n'.join([f'PROMPT: ```\n{prompt[:40]}```Score: {count}'
for count, prompt in proposals[:10]])
+ f'..and {len(proposals) - 10} more' * (len(proposals) > 10), reference=ctx.message))
async def start(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
await discord_check(ctx, ctx.settings['bot']['started'], "Not starting another thread.")
await discord_check(ctx, not hasattr(channel, "guild"), "The bot can't be used in DM.")
guild: discord.Guild = channel.guild
author: discord.User = ctx.message.author
await discord_check(ctx, not channel.id == ALLOWED_CHANNEL or not guild.id == ALLOWED_GUILD,
"Insufficient permission. This bot can only be used in its dedicated channel on the "
"\"Yannic Kilcher\" discord server.")
await discord_check(ctx, author.id not in ADMIN_USER,
"Insufficient permission. Only the owner of this bot is allowed to run this command. "
"Try .add instead")
ctx.settings['bot']['started'] = 1
fire(ctx, channel.send("Starting the listener for this channel.", reference=ctx.message),
prune(ctx))
while True:
proposals = await eval_queue(ctx)
if proposals:
_, (count, _, _) = max(proposals.items(), key=lambda x: x[1][0])
best, message_id, author = random.choice([(prompt, message_id, author_id)
for message_id, (score, author_id, prompt)
in proposals.items()
if score == count])
if count < ctx.settings['bot']['min_score'] and ctx.settings['bot']['use_fallback']:
prompt = random.choice(FALLBACKS)
response = call_gpt(prompt, ctx.settings)
prompt: discord.Message = await channel.send(f"PROMPT:\n```\n{prompt}```")
fire(ctx, channel.send(f"RESPONSE:\n```\n{response}```", reference=prompt))
elif count < ctx.settings['bot']['min_score'] and ctx.settings['bot']['show_no_score']:
fire(ctx, channel.send("Nothing has any score, skipping this one."))
else:
response = call_gpt(best, ctx.settings)
fire(ctx, channel.send(f"<@{author}>\nRESPONSE:```\n{response}```",
reference=await channel.fetch_message(message_id)))
del ctx.sources[message_id]
elif ctx.settings['bot']['use_fallback']:
prompt = random.choice(FALLBACKS)
response = call_gpt(prompt, ctx.settings)
prompt: discord.Message = await channel.send(f"PROMPT:\n```\n{prompt}```")
fire(ctx, channel.send(f"RESPONSE:\n```\n{response}```", reference=prompt))
elif ctx.settings['bot']['show_empty']:
fire(ctx, channel.send("No prompts in queue, skipping this one."))
min_ln = math.log(ctx.settings['bot']['min_response_time'])
max_ln = math.log(ctx.settings['bot']['max_response_time'])
delay = math.e ** (random.random() * (max_ln - min_ln) + min_ln)
print(f"Next delay: {int(delay / 60):3d} minutes")
start_time = time.time()
time.sleep(delay + start_time - time.time()) # Ensure delay stays the same
async def change_setting(ctx: Context):
channel: discord.TextChannel = ctx.message.channel
author: discord.User = ctx.message.author
arguments = ctx.message.content.split(' ')[1:]
await discord_check(ctx, len(arguments) != 3,
"Invalid number of arguments. Should be `group_name parameter_name value`")
await discord_check(ctx, author.id not in ADMIN_USER,
"Invalid number of arguments. Should be `group_name parameter_name value`")
group_name, parameter_name, value = arguments
previous_value = ctx.settings[group_name][parameter_name]
ctx.settings[group_name][parameter_name] = type(previous_value)(value)
fire(ctx, channel.send(f"Changed {parameter_name} from {previous_value} to {value}", reference=ctx.message))
COMMANDS = {'change_setting': change_setting, 'settings': settings, 'add': add, 'complete': complete,
'queue': queue, 'start': start, 'dump_queue': dump_queue, 'load_queue': load_queue,
'dump_settings': dump_settings, 'load_settings': load_settings,
'dump_fallbacks': dump_fallbacks, 'load_fallbacks': load_fallbacks, 'add_fallback': add_fallback,
'delete': delete, 'role': role,
'restart': restart, 'verify': verify
}
async def bot_help(ctx: Context):
fire(ctx, ctx.message.channel.send(f'Available Commands: `{"` `".join(sorted(list(COMMANDS.keys())))}`',
reference=ctx.message))
COMMANDS['help'] = bot_help
def init(idx: int, available_workers: list, handled_messages: dict, sources: dict, settings: dict):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = discord.Client()
@client.event
async def on_message(message: discord.Message):
fn_name = message.content[1:]
if ' ' in fn_name:
fn_name = fn_name[:fn_name.find(' ')]
try:
local_check(fn_name not in COMMANDS, "Unknown command")
local_check(idx not in available_workers, "I'm already working. Skipping task.")
local_check(not message.content.startswith('.'), "Not a command")
local_check(message.id in handled_messages, "handled already")
local_check(message.id % len(available_workers) != available_workers.index(idx), f"Not mine {idx}")
except ExitFunctionException:
return
handled_messages[message.id] = time.time()
available_workers.remove(idx)
ctx: Context = Context(client, message, sources, settings, [])
try:
fire(ctx, COMMANDS[fn_name](ctx))
except ExitFunctionException:
pass
except Exception as exc:
if LOG_LEVEL <= logging.ERROR:
traceback.print_exc()
await await_ctx(ctx)
if idx not in available_workers:
available_workers.append(idx)
@client.event
async def on_ready():
global APPROVAL_EMOJI, DISAPPROVAL_EMOJI, CHANNEL
if isinstance(APPROVAL_EMOJI, str):
for emoji in client.emojis:
emoji: discord.Emoji = emoji
if emoji.name == APPROVAL_EMOJI:
APPROVAL_EMOJI = emoji
if emoji.name == DISAPPROVAL_EMOJI:
DISAPPROVAL_EMOJI = emoji
connection: discord_state.ConnectionState = client._connection
guild: discord.Guild = connection._get_guild(ALLOWED_GUILD)
CHANNEL = guild.get_channel(ALLOWED_CHANNEL)
if idx not in available_workers:
available_workers.append(idx)
debug(f"Instance {idx} ({len(available_workers)}/{THREADS}) logged in as {client.user.name}")
loop.create_task(client.start(DISCORD_TOKEN))
loop.run_forever()
loop.close()
def clean_handled_messages(handled_messages):
while True:
for msg_id, timestamp in handled_messages.items():
if timestamp + CLEANUP > time.time():
del handled_messages[msg_id]
time.sleep(CLEANUP)
def backup(sources):
while True:
with open("queue_dump.json", 'w') as f:
f.write(jsonpickle.dumps(dict(sources), indent=4))
time.sleep(600)
if __name__ == '__main__':
manager = multiprocessing.Manager()
_workers = manager.list([])
_handled_messages = manager.dict({})
_sources = manager.dict({})
_gpt3 = manager.dict({})
_bot = manager.dict({})
_settings = manager.dict({})
_gpt3.update({'temperature': 0.5,
'top_p': 1,
'max_tokens': 256,
'presence_penalty': 0.45,
'frequency_penalty': 0.65,
'best_of': 1,
'engine': "davinci"
})
_bot.update({'min_response_time': 60,
'max_response_time': 60 * 60 * 24,
"started": 0,
'min_score': 0,
'show_no_score': 0,
'show_empty': 0,
'use_fallback': 0,
'max_synchronisation_delay_ms': 2000,
})
_settings.update({'gpt3': _gpt3,
'bot': _bot
})
procs = [
multiprocessing.Process(target=init, args=(idx, _workers, _handled_messages, _sources, _settings), daemon=True)
for idx in range(THREADS)]
procs.append(multiprocessing.Process(target=clean_handled_messages, args=(_handled_messages,), daemon=True))
procs.append(multiprocessing.Process(target=backup, args=(_sources,), daemon=True))
for t in procs:
t.start()
for t in procs:
t.join()
|
test_threaded_import.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import unittest
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads,
forget, unlink, rmtree, start_threads)
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
def check_parallel_module_init(self):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
with start_threads(threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
for i in range(N)):
pass
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
def test_side_effect_import(self):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()
t = None"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
del sys.modules[TESTFN]
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate a MongoDB server, for use in unittests."""
from __future__ import print_function
__author__ = 'A. Jesse Jiryu Davis'
__email__ = 'jesse@mongodb.com'
__version__ = '1.8.0.dev0'
import atexit
import contextlib
import datetime
import errno
import functools
import inspect
import os
import random
import select
import ssl as _ssl
import socket
import struct
import traceback
import threading
import time
import weakref
import sys
from codecs import utf_8_decode as _utf_8_decode
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
try:
from collections.abc import Mapping
except:
from collections import Mapping
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict # Python 2.6, "pip install ordereddict"
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
try:
from urllib.parse import quote_plus
except ImportError:
# Python 2
from urllib import quote_plus
import bson
from bson import codec_options, json_util
CODEC_OPTIONS = codec_options.CodecOptions(document_class=OrderedDict)
PY3 = sys.version_info[0] == 3
if PY3:
string_type = str
text_type = str
def reraise(exctype, value, trace=None):
raise exctype(str(value)).with_traceback(trace)
else:
string_type = basestring
text_type = unicode
# "raise x, y, z" raises SyntaxError in Python 3.
exec ("""def reraise(exctype, value, trace=None):
raise exctype, str(value), trace
""")
__all__ = [
'MockupDB', 'go', 'going', 'Future', 'wait_until', 'interactive_server',
'OP_REPLY', 'OP_UPDATE', 'OP_INSERT', 'OP_QUERY', 'OP_GET_MORE',
'OP_DELETE', 'OP_KILL_CURSORS', 'OP_MSG',
'QUERY_FLAGS', 'UPDATE_FLAGS', 'INSERT_FLAGS', 'DELETE_FLAGS',
'REPLY_FLAGS', 'OP_MSG_FLAGS',
'Request', 'Command', 'OpQuery', 'OpGetMore', 'OpKillCursors', 'OpInsert',
'OpUpdate', 'OpDelete', 'OpReply', 'OpMsg',
'Matcher', 'absent',
]
def go(fn, *args, **kwargs):
"""Launch an operation on a thread and get a handle to its future result.
>>> from time import sleep
>>> def print_sleep_print(duration):
... sleep(duration)
... print('hello from background thread')
... sleep(duration)
... print('goodbye from background thread')
... return 'return value'
...
>>> future = go(print_sleep_print, 0.1)
>>> sleep(0.15)
hello from background thread
>>> print('main thread')
main thread
>>> result = future()
goodbye from background thread
>>> result
'return value'
"""
if not callable(fn):
raise TypeError('go() requires a function, not %r' % (fn,))
result = [None]
error = []
def target():
try:
result[0] = fn(*args, **kwargs)
except Exception:
# Are we in interpreter shutdown?
if sys:
error.extend(sys.exc_info())
t = threading.Thread(target=target)
t.daemon = True
t.start()
def get_result(timeout=10):
t.join(timeout)
if t.is_alive():
raise AssertionError('timed out waiting for %r' % fn)
if error:
reraise(*error)
return result[0]
return get_result
@contextlib.contextmanager
def going(fn, *args, **kwargs):
"""Launch a thread and wait for its result before exiting the code block.
>>> with going(lambda: 'return value') as future:
... pass
>>> future() # Won't block, the future is ready by now.
'return value'
Or discard the result:
>>> with going(lambda: "don't care"):
... pass
If an exception is raised within the context, the result is lost:
>>> with going(lambda: 'return value') as future:
... assert 1 == 0
Traceback (most recent call last):
...
AssertionError
"""
future = go(fn, *args, **kwargs)
try:
yield future
except:
# We are raising an exception, just try to clean up the future.
exc_info = sys.exc_info()
try:
# Shorter than normal timeout.
future(timeout=1)
except:
log_message = ('\nerror in %s:\n'
% format_call(inspect.currentframe()))
sys.stderr.write(log_message)
traceback.print_exc()
# sys.stderr.write('exc in %s' % format_call(inspect.currentframe()))
reraise(*exc_info)
else:
# Raise exception or discard result.
future(timeout=10)
class Future(object):
def __init__(self):
self._result = None
self._event = threading.Event()
def result(self, timeout=None):
self._event.wait(timeout)
# wait() always returns None in Python 2.6.
if not self._event.is_set():
raise AssertionError('timed out waiting for Future')
return self._result
def set_result(self, result):
if self._event.is_set():
raise RuntimeError("Future is already resolved")
self._result = result
self._event.set()
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1)
OP_REPLY = 1
OP_UPDATE = 2001
OP_INSERT = 2002
OP_QUERY = 2004
OP_GET_MORE = 2005
OP_DELETE = 2006
OP_KILL_CURSORS = 2007
OP_MSG = 2013
QUERY_FLAGS = OrderedDict([
('TailableCursor', 2),
('SlaveOkay', 4),
('OplogReplay', 8),
('NoTimeout', 16),
('AwaitData', 32),
('Exhaust', 64),
('Partial', 128)])
UPDATE_FLAGS = OrderedDict([
('Upsert', 1),
('MultiUpdate', 2)])
INSERT_FLAGS = OrderedDict([
('ContinueOnError', 1)])
DELETE_FLAGS = OrderedDict([
('SingleRemove', 1)])
REPLY_FLAGS = OrderedDict([
('CursorNotFound', 1),
('QueryFailure', 2)])
OP_MSG_FLAGS = OrderedDict([
('checksumPresent', 1),
('moreToCome', 2)])
_UNPACK_BYTE = struct.Struct("<b").unpack
_UNPACK_INT = struct.Struct("<i").unpack
_UNPACK_UINT = struct.Struct("<I").unpack
_UNPACK_LONG = struct.Struct("<q").unpack
def _get_c_string(data, position):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end], None, True)[0], end + 1
class _PeekableQueue(Queue):
"""Only safe from one consumer thread at a time."""
_NO_ITEM = object()
def __init__(self, *args, **kwargs):
Queue.__init__(self, *args, **kwargs)
self._item = _PeekableQueue._NO_ITEM
def peek(self, block=True, timeout=None):
if self._item is not _PeekableQueue._NO_ITEM:
return self._item
else:
self._item = self.get(block, timeout)
return self._item
def get(self, block=True, timeout=None):
if self._item is not _PeekableQueue._NO_ITEM:
item = self._item
self._item = _PeekableQueue._NO_ITEM
return item
else:
return Queue.get(self, block, timeout)
def _ismap(obj):
return isinstance(obj, Mapping)
def _islist(obj):
return isinstance(obj, list)
def _dt_rounded(dt):
"""Python datetimes have microsecond precision, BSON only milliseconds."""
return dt.replace(microsecond=dt.microsecond - dt.microsecond % 1000)
class Request(object):
"""Base class for `Command`, `OpMsg`, and so on.
Some useful asserts you can do in tests:
>>> {'_id': 0} in OpInsert({'_id': 0})
True
>>> {'_id': 1} in OpInsert({'_id': 0})
False
>>> {'_id': 1} in OpInsert([{'_id': 0}, {'_id': 1}])
True
>>> {'_id': 1} == OpInsert([{'_id': 0}, {'_id': 1}])[1]
True
>>> 'field' in OpMsg(field=1)
True
>>> 'field' in OpMsg()
False
>>> 'field' in OpMsg('ismaster')
False
>>> OpMsg(ismaster=False)['ismaster'] is False
True
"""
opcode = None
is_command = None
_non_matched_attrs = 'doc', 'docs'
_flags_map = None
def __init__(self, *args, **kwargs):
self._flags = kwargs.pop('flags', None)
self._namespace = kwargs.pop('namespace', None)
self._client = kwargs.pop('_client', None)
self._request_id = kwargs.pop('request_id', None)
self._server = kwargs.pop('_server', None)
self._verbose = self._server and self._server.verbose
self._server_port = kwargs.pop('server_port', None)
self._docs = make_docs(*args, **kwargs)
if not all(_ismap(doc) for doc in self._docs):
raise_args_err()
@property
def doc(self):
"""The request document, if there is exactly one.
Use this for queries, commands, and legacy deletes. Legacy writes may
have many documents, OP_GET_MORE and OP_KILL_CURSORS have none.
"""
assert len(self.docs) == 1, '%r has more than one document' % self
return self.docs[0]
@property
def docs(self):
"""The request documents, if any."""
return self._docs
@property
def namespace(self):
"""The operation namespace or None."""
return self._namespace
@property
def flags(self):
"""The request flags or None."""
return self._flags
@property
def slave_ok(self):
"""True if the SlaveOkay wire protocol flag is set."""
return self._flags and bool(
self._flags & QUERY_FLAGS['SlaveOkay'])
slave_okay = slave_ok
"""Synonym for `.slave_ok`."""
@property
def request_id(self):
"""The request id or None."""
return self._request_id
@property
def client_port(self):
"""Client connection's TCP port."""
address = self._client.getpeername()
if isinstance(address, tuple):
return address[1]
# Maybe a Unix domain socket connection.
return 0
@property
def server(self):
"""The `.MockupDB` server."""
return self._server
def assert_matches(self, *args, **kwargs):
"""Assert this matches a :ref:`message spec <message spec>`.
Returns self.
"""
matcher = make_matcher(*args, **kwargs)
if not matcher.matches(self):
raise AssertionError('%r does not match %r' % (self, matcher))
return self
def matches(self, *args, **kwargs):
"""True if this matches a :ref:`message spec <message spec>`."""
return make_matcher(*args, **kwargs).matches(self)
def replies(self, *args, **kwargs):
"""Send an `OpReply` to the client.
The default reply to a command is ``{'ok': 1}``, otherwise the default
is empty (no documents).
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
self._replies(*args, **kwargs)
return True
ok = send = sends = reply = replies
"""Synonym for `.replies`."""
def fail(self, err='MockupDB query failure', *args, **kwargs):
"""Reply to a query with the QueryFailure flag and an '$err' key.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('flags', 0)
kwargs['flags'] |= REPLY_FLAGS['QueryFailure']
kwargs['$err'] = err
self.replies(*args, **kwargs)
return True
def command_err(self, code=1, errmsg='MockupDB command failure',
*args, **kwargs):
"""Error reply to a command.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('ok', 0)
kwargs['code'] = code
kwargs['errmsg'] = errmsg
self.replies(*args, **kwargs)
return True
def hangup(self):
"""Close the connection.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
if self._server:
self._server._log('\t%d\thangup' % self.client_port)
self._client.shutdown(socket.SHUT_RDWR)
return True
hangs_up = hangup
"""Synonym for `.hangup`."""
def _matches_docs(self, docs, other_docs):
"""Overridable method."""
for doc, other_doc in zip(docs, other_docs):
if not self._match_map(doc, other_doc):
return False
return True
def _match_map(self, doc, other_doc):
for key, val in doc.items():
if val is absent:
if key in other_doc:
return False
elif not self._match_val(val, other_doc.get(key, None)):
return False
if isinstance(doc, (OrderedDict, bson.SON)):
if not isinstance(other_doc, (OrderedDict, bson.SON)):
raise TypeError(
"Can't compare ordered and unordered document types:"
" %r, %r" % (doc, other_doc))
keys = [key for key, val in doc.items()
if val is not absent]
if not seq_match(keys, list(other_doc.keys())):
return False
return True
def _match_list(self, lst, other_lst):
if len(lst) != len(other_lst):
return False
for val, other_val in zip(lst, other_lst):
if not self._match_val(val, other_val):
return False
return True
def _match_val(self, val, other_val):
if _ismap(val) and _ismap(other_val):
if not self._match_map(val, other_val):
return False
elif _islist(val) and _islist(other_val):
if not self._match_list(val, other_val):
return False
elif (isinstance(val, datetime.datetime)
and isinstance(other_val, datetime.datetime)):
if _dt_rounded(val) != _dt_rounded(other_val):
return False
elif val != other_val:
return False
return True
def _replies(self, *args, **kwargs):
"""Overridable method."""
reply_msg = make_reply(*args, **kwargs)
if self._server:
self._server._log('\t%d\t<-- %r' % (self.client_port, reply_msg))
reply_bytes = reply_msg.reply_bytes(self)
self._client.sendall(reply_bytes)
def __contains__(self, item):
if item in self.docs:
return True
if len(self.docs) == 1 and isinstance(item, (string_type, text_type)):
return item in self.doc
return False
def __getitem__(self, item):
return self.doc[item] if len(self.docs) == 1 else self.docs[item]
def __str__(self):
return docs_repr(*self.docs)
def __repr__(self):
name = self.__class__.__name__
parts = []
if self.docs:
parts.append(docs_repr(*self.docs))
if self._flags:
if self._flags_map:
parts.append('flags=%s' % (
'|'.join(name for name, value in self._flags_map.items()
if self._flags & value)))
else:
parts.append('flags=%d' % self._flags)
if self._namespace:
parts.append('namespace="%s"' % self._namespace)
return '%s(%s)' % (name, ', '.join(str(part) for part in parts))
class CommandBase(Request):
"""A command the client executes on the server."""
is_command = True
# Check command name case-insensitively.
_non_matched_attrs = Request._non_matched_attrs + ('command_name',)
@property
def command_name(self):
"""The command name or None.
>>> OpMsg({'count': 'collection'}).command_name
'count'
>>> OpMsg('aggregate', 'collection', cursor=absent).command_name
'aggregate'
"""
if self.docs and self.docs[0]:
return list(self.docs[0])[0]
def _matches_docs(self, docs, other_docs):
assert len(docs) == len(other_docs) == 1
doc, = docs
other_doc, = other_docs
items = list(doc.items())
other_items = list(other_doc.items())
# Compare command name case-insensitively.
if items and other_items:
if items[0][0].lower() != other_items[0][0].lower():
return False
if items[0][1] != other_items[0][1]:
return False
return super(CommandBase, self)._matches_docs(
[OrderedDict(items[1:])],
[OrderedDict(other_items[1:])])
class OpMsg(CommandBase):
"""An OP_MSG request the client executes on the server."""
opcode = OP_MSG
is_command = True
_flags_map = OP_MSG_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpMsg`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_UINT(msg[:4])
pos = 4
first_payload_type, = _UNPACK_BYTE(msg[pos:pos + 1])
pos += 1
first_payload_size, = _UNPACK_INT(msg[pos:pos + 4])
if flags != 0 and flags != 2:
raise ValueError('OP_MSG flag must be 0 or 2 not %r' % (flags,))
if first_payload_type != 0:
raise ValueError('First OP_MSG payload type must be 0 not %r' % (
first_payload_type,))
# Parse the initial document and add the optional payload type 1.
payload_document = bson.decode_all(msg[pos:pos + first_payload_size],
CODEC_OPTIONS)[0]
pos += first_payload_size
if len(msg) != pos:
payload_type, = _UNPACK_BYTE(msg[pos:pos + 1])
pos += 1
if payload_type != 1:
raise ValueError('Second OP_MSG payload type must be 1 not %r'
% (payload_type,))
section_size, = _UNPACK_INT(msg[pos:pos + 4])
if len(msg) != pos + section_size:
raise ValueError('More than two OP_MSG sections unsupported')
pos += 4
identifier, pos = _get_c_string(msg, pos)
documents = bson.decode_all(msg[pos:], CODEC_OPTIONS)
payload_document[identifier] = documents
database = payload_document['$db']
return OpMsg(payload_document, namespace=database, flags=flags,
_client=client, request_id=request_id,
_server=server)
def __init__(self, *args, **kwargs):
super(OpMsg, self).__init__(*args, **kwargs)
if len(self._docs) > 1:
raise_args_err('OpMsg too many documents', ValueError)
@property
def slave_ok(self):
"""True if this OpMsg can read from a secondary."""
read_preference = self.doc.get('$readPreference')
return read_preference and read_preference.get('mode') != 'primary'
slave_okay = slave_ok
"""Synonym for `.slave_ok`."""
@property
def command_name(self):
"""The command name or None.
>>> OpMsg({'count': 'collection'}).command_name
'count'
>>> OpMsg('aggregate', 'collection', cursor=absent).command_name
'aggregate'
"""
if self.docs and self.docs[0]:
return list(self.docs[0])[0]
def _replies(self, *args, **kwargs):
if self.flags & OP_MSG_FLAGS['moreToCome']:
assert False, "Cannot reply to OpMsg with moreToCome: %r" % (self,)
reply = make_op_msg_reply(*args, **kwargs)
if not reply.docs:
reply.docs = [{'ok': 1}]
else:
if len(reply.docs) > 1:
raise ValueError('OP_MSG reply with multiple documents: %s'
% (reply.docs,))
reply.doc.setdefault('ok', 1)
super(OpMsg, self)._replies(reply)
class OpQuery(Request):
"""A query (besides a command) the client executes on the server.
>>> OpQuery({'i': {'$gt': 2}}, fields={'j': False})
OpQuery({"i": {"$gt": 2}}, fields={"j": false})
"""
opcode = OP_QUERY
is_command = False
_flags_map = QUERY_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpQuery` or `Command`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
is_command = namespace.endswith('.$cmd')
num_to_skip, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
if is_command:
assert len(docs) == 1
command_ns = namespace[:-len('.$cmd')]
return Command(docs, namespace=command_ns, flags=flags,
_client=client, request_id=request_id,
_server=server)
else:
if len(docs) == 1:
fields = None
else:
assert len(docs) == 2
fields = docs[1]
return OpQuery(docs[0], fields=fields, namespace=namespace,
flags=flags, num_to_skip=num_to_skip,
num_to_return=num_to_return, _client=client,
request_id=request_id, _server=server)
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
if fields is not None and not _ismap(fields):
raise_args_err()
self._fields = fields
self._num_to_skip = kwargs.pop('num_to_skip', None)
self._num_to_return = kwargs.pop('num_to_return', None)
super(OpQuery, self).__init__(*args, **kwargs)
if not self._docs:
self._docs = [{}] # Default query filter.
elif len(self._docs) > 1:
raise_args_err('OpQuery too many documents', ValueError)
@property
def num_to_skip(self):
"""Client query's numToSkip or None."""
return self._num_to_skip
@property
def num_to_return(self):
"""Client query's numToReturn or None."""
return self._num_to_return
@property
def fields(self):
"""Client query's fields selector or None."""
return self._fields
def __repr__(self):
rep = super(OpQuery, self).__repr__().rstrip(')')
if self._fields:
rep += ', fields=%s' % docs_repr(self._fields)
if self._num_to_skip is not None:
rep += ', numToSkip=%d' % self._num_to_skip
if self._num_to_return is not None:
rep += ', numToReturn=%d' % self._num_to_return
return rep + ')'
class Command(CommandBase, OpQuery):
"""A command the client executes on the server."""
def _replies(self, *args, **kwargs):
reply = make_reply(*args, **kwargs)
if not reply.docs:
reply.docs = [{'ok': 1}]
else:
if len(reply.docs) > 1:
raise ValueError('Command reply with multiple documents: %s'
% (reply.docs,))
reply.doc.setdefault('ok', 1)
super(Command, self)._replies(reply)
def replies_to_gle(self, **kwargs):
"""Send a getlasterror response.
Defaults to ``{ok: 1, err: null}``. Add or override values by passing
keyword arguments.
Returns True so it is suitable as an `~MockupDB.autoresponds` handler.
"""
kwargs.setdefault('err', None)
return self.replies(**kwargs)
class OpGetMore(Request):
"""An OP_GET_MORE the client executes on the server."""
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpGetMore`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
cursor_id, = _UNPACK_LONG(msg[pos:pos + 8])
return OpGetMore(namespace=namespace, flags=flags, _client=client,
num_to_return=num_to_return, cursor_id=cursor_id,
request_id=request_id, _server=server)
def __init__(self, **kwargs):
self._num_to_return = kwargs.pop('num_to_return', None)
self._cursor_id = kwargs.pop('cursor_id', None)
super(OpGetMore, self).__init__(**kwargs)
@property
def num_to_return(self):
"""The client message's numToReturn field."""
return self._num_to_return
@property
def cursor_id(self):
"""The client message's cursorId field."""
return self._cursor_id
class OpKillCursors(Request):
"""An OP_KILL_CURSORS the client executes on the server."""
@classmethod
def unpack(cls, msg, client, server, _):
"""Parse message and return an `OpKillCursors`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
# Leading 4 bytes are reserved.
num_of_cursor_ids, = _UNPACK_INT(msg[4:8])
cursor_ids = []
pos = 8
for _ in range(num_of_cursor_ids):
cursor_ids.append(_UNPACK_INT(msg[pos:pos + 4])[0])
pos += 4
return OpKillCursors(_client=client, cursor_ids=cursor_ids,
_server=server)
def __init__(self, **kwargs):
self._cursor_ids = kwargs.pop('cursor_ids', None)
super(OpKillCursors, self).__init__(**kwargs)
@property
def cursor_ids(self):
"""List of cursor ids the client wants to kill."""
return self._cursor_ids
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._cursor_ids)
class _LegacyWrite(Request):
is_command = False
class OpInsert(_LegacyWrite):
"""A legacy OP_INSERT the client executes on the server."""
opcode = OP_INSERT
_flags_map = INSERT_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpInsert`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
return cls(*docs, namespace=namespace, flags=flags, _client=client,
request_id=request_id, _server=server)
class OpUpdate(_LegacyWrite):
"""A legacy OP_UPDATE the client executes on the server."""
opcode = OP_UPDATE
_flags_map = UPDATE_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpUpdate`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
# First 4 bytes of OP_UPDATE are "reserved".
namespace, pos = _get_c_string(msg, 4)
flags, = _UNPACK_INT(msg[pos:pos + 4])
docs = bson.decode_all(msg[pos + 4:], CODEC_OPTIONS)
return cls(*docs, namespace=namespace, flags=flags, _client=client,
request_id=request_id, _server=server)
class OpDelete(_LegacyWrite):
"""A legacy OP_DELETE the client executes on the server."""
opcode = OP_DELETE
_flags_map = DELETE_FLAGS
@classmethod
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpDelete`.
Takes the client message as bytes, the client and server socket objects,
and the client request id.
"""
# First 4 bytes of OP_DELETE are "reserved".
namespace, pos = _get_c_string(msg, 4)
flags, = _UNPACK_INT(msg[pos:pos + 4])
docs = bson.decode_all(msg[pos + 4:], CODEC_OPTIONS)
return cls(*docs, namespace=namespace, flags=flags, _client=client,
request_id=request_id, _server=server)
class Reply(object):
"""A reply from `MockupDB` to the client."""
def __init__(self, *args, **kwargs):
self._flags = kwargs.pop('flags', 0)
self._docs = make_docs(*args, **kwargs)
@property
def doc(self):
"""Contents of reply.
Useful for replies to commands; replies to other messages may have no
documents or multiple documents.
"""
assert len(self._docs) == 1, '%s has more than one document' % self
return self._docs[0]
def __str__(self):
return docs_repr(*self._docs)
def __repr__(self):
rep = '%s(%s' % (self.__class__.__name__, self)
if self._flags:
rep += ', flags=' + '|'.join(
name for name, value in REPLY_FLAGS.items()
if self._flags & value)
return rep + ')'
class OpReply(Reply):
"""An OP_REPLY reply from `MockupDB` to the client."""
def __init__(self, *args, **kwargs):
self._cursor_id = kwargs.pop('cursor_id', 0)
self._starting_from = kwargs.pop('starting_from', 0)
super(OpReply, self).__init__(*args, **kwargs)
@property
def docs(self):
"""The reply documents, if any."""
return self._docs
@docs.setter
def docs(self, docs):
self._docs = make_docs(docs)
def update(self, *args, **kwargs):
"""Update the document. Same as ``dict().update()``.
>>> reply = OpReply({'ismaster': True})
>>> reply.update(maxWireVersion=3)
>>> reply.doc['maxWireVersion']
3
>>> reply.update({'maxWriteBatchSize': 10, 'msg': 'isdbgrid'})
"""
self.doc.update(*args, **kwargs)
def reply_bytes(self, request):
"""Take a `Request` and return an OP_REPLY message as bytes."""
flags = struct.pack("<i", self._flags)
cursor_id = struct.pack("<q", self._cursor_id)
starting_from = struct.pack("<i", self._starting_from)
number_returned = struct.pack("<i", len(self._docs))
reply_id = random.randint(0, 1000000)
response_to = request.request_id
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in self._docs])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data
class OpMsgReply(Reply):
"""A OP_MSG reply from `MockupDB` to the client."""
def __init__(self, *args, **kwargs):
super(OpMsgReply, self).__init__(*args, **kwargs)
assert len(self._docs) <= 1, 'OpMsgReply can only have one document'
@property
def docs(self):
"""The reply documents, if any."""
return self._docs
@docs.setter
def docs(self, docs):
self._docs = make_docs(docs)
assert len(self._docs) == 1, 'OpMsgReply must have one document'
def update(self, *args, **kwargs):
"""Update the document. Same as ``dict().update()``.
>>> reply = OpMsgReply({'ismaster': True})
>>> reply.update(maxWireVersion=3)
>>> reply.doc['maxWireVersion']
3
>>> reply.update({'maxWriteBatchSize': 10, 'msg': 'isdbgrid'})
"""
self.doc.update(*args, **kwargs)
def reply_bytes(self, request):
"""Take a `Request` and return an OP_MSG message as bytes."""
flags = struct.pack("<I", self._flags)
payload_type = struct.pack("<b", 0)
payload_data = bson.BSON.encode(self.doc)
data = b''.join([flags, payload_type, payload_data])
reply_id = random.randint(0, 1000000)
response_to = request.request_id
header = struct.pack(
"<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data
def __repr__(self):
rep = '%s(%s' % (self.__class__.__name__, self)
if self._flags:
rep += ', flags=' + '|'.join(
name for name, value in OP_MSG_FLAGS.items()
if self._flags & value)
return rep + ')'
absent = {'absent': 1}
class Matcher(object):
"""Matches a subset of `.Request` objects.
Initialized with a :ref:`message spec <message spec>`.
Used by `~MockupDB.receives` to assert the client sent the expected request,
and by `~MockupDB.got` to test if it did and return ``True`` or ``False``.
Used by `.autoresponds` to match requests with autoresponses.
"""
def __init__(self, *args, **kwargs):
self._kwargs = kwargs
self._prototype = make_prototype_request(*args, **kwargs)
def matches(self, *args, **kwargs):
"""Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``.
"""
request = make_prototype_request(*args, **kwargs)
if self._prototype.opcode not in (None, request.opcode):
return False
if self._prototype.is_command not in (None, request.is_command):
return False
for name in dir(self._prototype):
if name.startswith('_') or name in request._non_matched_attrs:
# Ignore privates, and handle documents specially.
continue
prototype_value = getattr(self._prototype, name, None)
if inspect.ismethod(prototype_value):
continue
actual_value = getattr(request, name, None)
if prototype_value not in (None, actual_value):
return False
if len(self._prototype.docs) not in (0, len(request.docs)):
return False
return self._prototype._matches_docs(self._prototype.docs, request.docs)
@property
def prototype(self):
"""The prototype `.Request` used to match actual requests with."""
return self._prototype
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._prototype)
def _synchronized(meth):
"""Call method while holding a lock."""
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
with self._lock:
return meth(self, *args, **kwargs)
return wrapper
class _AutoResponder(object):
def __init__(self, server, matcher, *args, **kwargs):
self._server = server
if inspect.isfunction(matcher) or inspect.ismethod(matcher):
if args or kwargs:
raise_args_err()
self._matcher = Matcher() # Match anything.
self._handler = matcher
self._args = ()
self._kwargs = {}
else:
self._matcher = make_matcher(matcher)
if args and callable(args[0]):
self._handler = args[0]
if args[1:] or kwargs:
raise_args_err()
self._args = ()
self._kwargs = {}
else:
self._handler = None
self._args = args
self._kwargs = kwargs
def handle(self, request):
if self._matcher.matches(request):
if self._handler:
return self._handler(request)
else:
# Command.replies() overrides Request.replies() with special
# logic, which is why we saved args and kwargs until now to
# pass it into request.replies, instead of making an OpReply
# ourselves in __init__.
request.replies(*self._args, **self._kwargs)
return True
def cancel(self):
"""Stop autoresponding."""
self._server.cancel_responder(self)
def __repr__(self):
return '_AutoResponder(%r, %r, %r)' % (
self._matcher, self._args, self._kwargs)
_shutting_down = False
_global_threads = weakref.WeakKeyDictionary()
def _shut_down(threads):
global _shutting_down
_shutting_down = True
for t in threads:
try:
t.join(10)
except:
pass
atexit.register(_shut_down, _global_threads)
class MockupDB(object):
"""A simulated mongod or mongos.
Call `run` to start the server, and always `close` it to avoid exceptions
during interpreter shutdown.
See the tutorial for comprehensive examples.
:Optional parameters:
- `port`: listening port number. If not specified, choose
some unused port and return the port number from `run`.
- `verbose`: if ``True``, print requests and replies to stdout.
- `request_timeout`: seconds to wait for the next client request, or else
assert. Default 10 seconds. Pass int(1e6) to disable.
- `auto_ismaster`: pass ``True`` to autorespond ``{'ok': 1}`` to
ismaster requests, or pass a dict or `OpReply`.
- `ssl`: pass ``True`` to require SSL.
- `min_wire_version`: the minWireVersion to include in ismaster responses
if `auto_ismaster` is True, default 0.
- `max_wire_version`: the maxWireVersion to include in ismaster responses
if `auto_ismaster` is True, default 6.
- `uds_path`: a Unix domain socket path. MockupDB will attempt to delete
the path if it already exists.
"""
def __init__(self, port=None, verbose=False,
request_timeout=10, auto_ismaster=None,
ssl=False, min_wire_version=0, max_wire_version=6,
uds_path=None):
if port is not None and uds_path is not None:
raise TypeError(
("You can't pass port=%s and uds_path=%s,"
" pass only one or neither") % (port, uds_path))
self._uds_path = uds_path
if uds_path:
self._address = (uds_path, 0)
else:
self._address = ('localhost', port)
self._verbose = verbose
self._label = None
self._ssl = ssl
self._request_timeout = request_timeout
self._listening_sock = None
self._accept_thread = None
# Track sockets that we want to close in stop(). Keys are sockets,
# values are None (this could be a WeakSet but it's new in Python 2.7).
self._server_threads = weakref.WeakKeyDictionary()
self._server_socks = weakref.WeakKeyDictionary()
self._stopped = False
self._request_q = _PeekableQueue()
self._requests_count = 0
self._lock = threading.Lock()
# List of (request_matcher, args, kwargs), where args and kwargs are
# like those sent to request.reply().
self._autoresponders = []
if auto_ismaster is True:
self.autoresponds(CommandBase('ismaster'),
{'ismaster': True,
'minWireVersion': min_wire_version,
'maxWireVersion': max_wire_version})
elif auto_ismaster:
self.autoresponds(CommandBase('ismaster'), auto_ismaster)
@_synchronized
def run(self):
"""Begin serving. Returns the bound port, or 0 for domain socket."""
self._listening_sock, self._address = (
bind_domain_socket(self._address)
if self._uds_path
else bind_tcp_socket(self._address))
if self._ssl:
certfile = os.path.join(os.path.dirname(__file__), 'server.pem')
self._listening_sock = _ssl.wrap_socket(
self._listening_sock,
certfile=certfile,
server_side=True)
self._accept_thread = threading.Thread(target=self._accept_loop)
self._accept_thread.daemon = True
self._accept_thread.start()
return self.port
@_synchronized
def stop(self):
"""Stop serving. Always call this to clean up after yourself."""
self._stopped = True
threads = [self._accept_thread]
threads.extend(self._server_threads)
self._listening_sock.close()
for sock in list(self._server_socks):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
sock.close()
except socket.error:
pass
with self._unlock():
for thread in threads:
thread.join(10)
if self._uds_path:
try:
os.unlink(self._uds_path)
except OSError:
pass
def receives(self, *args, **kwargs):
"""Pop the next `Request` and assert it matches.
Returns None if the server is stopped.
Pass a `Request` or request pattern to specify what client request to
expect. See the tutorial for examples. Pass ``timeout`` as a keyword
argument to override this server's ``request_timeout``.
"""
timeout = kwargs.pop('timeout', self._request_timeout)
end = time.time() + timeout
matcher = Matcher(*args, **kwargs)
while not self._stopped:
try:
# Short timeout so we notice if the server is stopped.
request = self._request_q.get(timeout=0.05)
except Empty:
if time.time() > end:
raise AssertionError('expected to receive %r, got nothing'
% matcher.prototype)
else:
if matcher.matches(request):
return request
else:
raise AssertionError('expected to receive %r, got %r'
% (matcher.prototype, request))
gets = pop = receive = receives
"""Synonym for `receives`."""
def got(self, *args, **kwargs):
"""Does `.request` match the given :ref:`message spec <message spec>`?
>>> s = MockupDB(auto_ismaster=True)
>>> port = s.run()
>>> s.got(timeout=0) # No request enqueued.
False
>>> from pymongo import MongoClient
>>> client = MongoClient(s.uri)
>>> future = go(client.db.command, 'foo')
>>> s.got('foo')
True
>>> s.got(OpMsg('foo', namespace='db'))
True
>>> s.got(OpMsg('foo', key='value'))
False
>>> s.ok()
>>> future() == {'ok': 1}
True
>>> s.stop()
"""
timeout = kwargs.pop('timeout', self._request_timeout)
end = time.time() + timeout
matcher = make_matcher(*args, **kwargs)
while not self._stopped:
try:
# Short timeout so we notice if the server is stopped.
request = self._request_q.peek(timeout=timeout)
except Empty:
if time.time() > end:
return False
else:
return matcher.matches(request)
wait = got
"""Synonym for `got`."""
def replies(self, *args, **kwargs):
"""Call `~Request.reply` on the currently enqueued request."""
self.pop().replies(*args, **kwargs)
ok = send = sends = reply = replies
"""Synonym for `.replies`."""
def fail(self, *args, **kwargs):
"""Call `~Request.fail` on the currently enqueued request."""
self.pop().fail(*args, **kwargs)
def command_err(self, *args, **kwargs):
"""Call `~Request.command_err` on the currently enqueued request."""
self.pop().command_err(*args, **kwargs)
def hangup(self):
"""Call `~Request.hangup` on the currently enqueued request."""
self.pop().hangup()
hangs_up = hangup
"""Synonym for `.hangup`."""
@_synchronized
def autoresponds(self, matcher, *args, **kwargs):
"""Send a canned reply to all matching client requests.
``matcher`` is a `Matcher` or a command name, or an instance of
`OpInsert`, `OpQuery`, etc.
>>> s = MockupDB()
>>> port = s.run()
>>>
>>> from pymongo import MongoClient
>>> client = MongoClient(s.uri)
>>> responder = s.autoresponds('ismaster', maxWireVersion=6)
>>> client.admin.command('ismaster') == {'ok': 1, 'maxWireVersion': 6}
True
The remaining arguments are a :ref:`message spec <message spec>`:
>>> # ok
>>> responder = s.autoresponds('bar', ok=0, errmsg='err')
>>> client.db.command('bar')
Traceback (most recent call last):
...
OperationFailure: command SON([('bar', 1)]) on namespace db.$cmd failed: err
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'_id': 1}, {'_id': 2}]}})
>>> # ok
>>> list(client.db.collection.find()) == [{'_id': 1}, {'_id': 2}]
True
>>> responder = s.autoresponds(OpMsg('find', 'collection'),
... {'cursor': {'id': 0, 'firstBatch': [{'a': 1}, {'a': 2}]}})
>>> # bad
>>> list(client.db.collection.find()) == [{'a': 1}, {'a': 2}]
True
Remove an autoresponder like:
>>> responder.cancel()
If the request currently at the head of the queue matches, it is popped
and replied to. Future matching requests skip the queue.
>>> future = go(client.db.command, 'baz')
>>> # bad
>>> responder = s.autoresponds('baz', {'key': 'value'})
>>> future() == {'ok': 1, 'key': 'value'}
True
Responders are applied in order, most recently added first, until one
matches:
>>> responder = s.autoresponds('baz')
>>> client.db.command('baz') == {'ok': 1}
True
>>> responder.cancel()
>>> # The previous responder takes over again.
>>> client.db.command('baz') == {'ok': 1, 'key': 'value'}
True
You can pass a request handler in place of the message spec. Return
True if you handled the request:
>>> responder = s.autoresponds('baz', lambda r: r.ok(a=2))
The standard `Request.ok`, `~Request.replies`, `~Request.fail`,
`~Request.hangup` and so on all return True to make them suitable
as handler functions.
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
True
If the request is not handled, it is checked against the remaining
responders, or enqueued if none match.
You can pass the handler as the only argument so it receives *all*
requests. For example you could log them, then return None to allow
other handlers to run:
>>> def logger(request):
... if not request.matches('ismaster'):
... print('logging: %r' % request)
>>> responder = s.autoresponds(logger)
>>> client.db.command('baz') == {'ok': 1, 'a': 2}
logging: OpMsg({"baz": 1, "$db": "db", "$readPreference": {"mode": "primaryPreferred"}}, namespace="db")
True
The synonym `subscribe` better expresses your intent if your handler
never returns True:
>>> subscriber = s.subscribe(logger)
.. doctest:
:hide:
>>> client.close()
>>> s.stop()
"""
responder = _AutoResponder(self, matcher, *args, **kwargs)
self._autoresponders.append(responder)
try:
request = self._request_q.peek(block=False)
except Empty:
pass
else:
if responder.handle(request):
self._request_q.get_nowait() # Pop it.
return responder
subscribe = autoresponds
"""Synonym for `.autoresponds`."""
@_synchronized
def cancel_responder(self, responder):
"""Cancel a responder that was registered with `autoresponds`."""
self._autoresponders.remove(responder)
@property
def address(self):
"""The listening (host, port)."""
return self._address
@property
def address_string(self):
"""The listening "host:port"."""
return format_addr(self._address)
@property
def host(self):
"""The listening hostname."""
return self._address[0]
@property
def port(self):
"""The listening port."""
return self._address[1]
@property
def uri(self):
"""Connection string to pass to `~pymongo.mongo_client.MongoClient`."""
if self._uds_path:
uri = 'mongodb://%s' % (quote_plus(self._uds_path),)
else:
uri = 'mongodb://%s' % (format_addr(self._address),)
return uri + '/?ssl=true' if self._ssl else uri
@property
def verbose(self):
"""If verbose logging is turned on."""
return self._verbose
@verbose.setter
def verbose(self, value):
if not isinstance(value, bool):
raise TypeError('value must be True or False, not %r' % value)
self._verbose = value
@property
def label(self):
"""Label for logging, or None."""
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def requests_count(self):
"""Number of requests this server has received.
Includes autoresponded requests.
"""
return self._requests_count
@property
def request(self):
"""The currently enqueued `Request`, or None.
.. warning:: This property is useful to check what the current request
is, but the pattern ``server.request.replies()`` is dangerous: you
must follow it with ``server.pop()`` or the current request remains
enqueued. Better to reply with ``server.pop().replies()`` than
``server.request.replies()`` or any variation on it.
"""
return self.got() or None
@property
@_synchronized
def running(self):
"""If this server is started and not stopped."""
return self._accept_thread and not self._stopped
def _accept_loop(self):
"""Accept client connections and spawn a thread for each."""
self._listening_sock.setblocking(0)
while not self._stopped and not _shutting_down:
try:
# Wait a short time to accept.
if select.select([self._listening_sock.fileno()], [], [], 1):
client, client_addr = self._listening_sock.accept()
client.setblocking(True)
self._log('connection from %s' % format_addr(client_addr))
server_thread = threading.Thread(
target=functools.partial(
self._server_loop, client, client_addr))
# Store weakrefs to the thread and socket, so we can
# dispose them in stop().
self._server_threads[server_thread] = None
self._server_socks[client] = None
server_thread.daemon = True
server_thread.start()
except socket.error as error:
if error.errno not in (
errno.EAGAIN, errno.EBADF, errno.EWOULDBLOCK):
raise
except select.error as error:
if error.args[0] == errno.EBADF:
# Closed.
break
else:
raise
@_synchronized
def _server_loop(self, client, client_addr):
"""Read requests from one client socket, 'client'."""
while not self._stopped and not _shutting_down:
try:
with self._unlock():
request = mock_server_receive_request(client, self)
self._requests_count += 1
self._log('%d\t%r' % (request.client_port, request))
# Give most recently added responders precedence.
for responder in reversed(self._autoresponders):
if responder.handle(request):
self._log('\t(autoresponse)')
break
else:
self._request_q.put(request)
except socket.error as error:
if error.errno in (errno.ECONNRESET, errno.EBADF):
# We hung up, or the client did.
break
raise
except select.error as error:
if error.args[0] == errno.EBADF:
# Closed.
break
else:
raise
except AssertionError:
traceback.print_exc()
break
self._log('disconnected: %s' % format_addr(client_addr))
client.close()
def _log(self, msg):
if self._verbose:
if self._label:
msg = '%s:\t%s' % (self._label, msg)
print(msg)
@contextlib.contextmanager
def _unlock(self):
"""Temporarily release the lock."""
self._lock.release()
try:
yield
finally:
self._lock.acquire()
def __iter__(self):
return self
def next(self):
request = self.receives()
if request is None:
# Server stopped.
raise StopIteration()
return request
__next__ = next
def __repr__(self):
if self._uds_path:
return 'MockupDB(uds_path=%s)' % (self._uds_path,)
return 'MockupDB(%s, %s)' % self._address
def format_addr(address):
"""Turn a TCP or Unix domain socket address into a string."""
if isinstance(address, tuple):
if address[1]:
return '%s:%d' % address
else:
return address[0]
return address
def bind_tcp_socket(address):
"""Takes (host, port) and returns (socket_object, (host, port)).
If the passed-in port is None, bind an unused port and return it.
"""
host, port = address
for res in set(socket.getaddrinfo(host, port, socket.AF_INET,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)):
family, socktype, proto, _, sock_addr = res
sock = socket.socket(family, socktype, proto)
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Automatic port allocation with port=None.
sock.bind(sock_addr)
sock.listen(128)
bound_port = sock.getsockname()[1]
return sock, (host, bound_port)
raise socket.error('could not bind socket')
def bind_domain_socket(address):
"""Takes (socket path, 0) and returns (socket_object, (path, 0))."""
path, _ = address
try:
os.unlink(path)
except OSError:
pass
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(128)
return sock, (path, 0)
OPCODES = {OP_MSG: OpMsg,
OP_QUERY: OpQuery,
OP_INSERT: OpInsert,
OP_UPDATE: OpUpdate,
OP_DELETE: OpDelete,
OP_GET_MORE: OpGetMore,
OP_KILL_CURSORS: OpKillCursors}
def mock_server_receive_request(client, server):
"""Take a client socket and return a Request."""
header = mock_server_receive(client, 16)
length = _UNPACK_INT(header[:4])[0]
request_id = _UNPACK_INT(header[4:8])[0]
opcode = _UNPACK_INT(header[12:])[0]
msg_bytes = mock_server_receive(client, length - 16)
if opcode not in OPCODES:
raise NotImplementedError("Don't know how to unpack opcode %d yet"
% opcode)
return OPCODES[opcode].unpack(msg_bytes, client, server, request_id)
def _errno_from_exception(exc):
if hasattr(exc, 'errno'):
return exc.errno
elif exc.args:
return exc.args[0]
else:
return None
def mock_server_receive(sock, length):
"""Receive `length` bytes from a socket object."""
msg = b''
while length:
chunk = sock.recv(length)
if chunk == b'':
raise socket.error(errno.ECONNRESET, 'closed')
length -= len(chunk)
msg += chunk
return msg
def make_docs(*args, **kwargs):
"""Make the documents for a `Request` or `Reply`.
Takes a variety of argument styles, returns a list of dicts.
Used by `make_prototype_request` and `make_reply`, which are in turn used by
`MockupDB.receives`, `Request.replies`, and so on. See examples in
tutorial.
"""
err_msg = "Can't interpret args: "
if not args and not kwargs:
return []
if not args:
# OpReply(ok=1, ismaster=True).
return [kwargs]
if isinstance(args[0], (int, float, bool)):
# server.receives().ok(0, err='uh oh').
if args[1:]:
raise_args_err(err_msg, ValueError)
doc = OrderedDict({'ok': args[0]})
doc.update(kwargs)
return [doc]
if isinstance(args[0], (list, tuple)):
# Send a batch: OpReply([{'a': 1}, {'a': 2}]).
if not all(isinstance(doc, (OpReply, Mapping))
for doc in args[0]):
raise_args_err('each doc must be a dict:')
if kwargs:
raise_args_err(err_msg, ValueError)
return list(args[0])
if isinstance(args[0], (string_type, text_type)):
if args[2:]:
raise_args_err(err_msg, ValueError)
if len(args) == 2:
# Command('aggregate', 'collection', {'cursor': {'batchSize': 1}}).
doc = OrderedDict({args[0]: args[1]})
else:
# OpReply('ismaster', me='a.com').
doc = OrderedDict({args[0]: 1})
doc.update(kwargs)
return [doc]
if kwargs:
raise_args_err(err_msg, ValueError)
# Send a batch as varargs: OpReply({'a': 1}, {'a': 2}).
if not all(isinstance(doc, (OpReply, Mapping)) for doc in args):
raise_args_err('each doc must be a dict')
return args
def make_matcher(*args, **kwargs):
"""Make a Matcher from a :ref:`message spec <message spec>`:
>>> make_matcher()
Matcher(Request())
>>> make_matcher({'ismaster': 1}, namespace='admin')
Matcher(Request({"ismaster": 1}, namespace="admin"))
>>> make_matcher({}, {'_id': 1})
Matcher(Request({}, {"_id": 1}))
See more examples in the tutorial section for :ref:`Message Specs`.
"""
if args and isinstance(args[0], Matcher):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
return Matcher(*args, **kwargs)
def make_prototype_request(*args, **kwargs):
"""Make a prototype Request for a Matcher."""
if args and inspect.isclass(args[0]) and issubclass(args[0], Request):
request_cls, arg_list = args[0], args[1:]
return request_cls(*arg_list, **kwargs)
if args and isinstance(args[0], Request):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
# Match any opcode.
return Request(*args, **kwargs)
def make_reply(*args, **kwargs):
# Error we might raise.
if args and isinstance(args[0], (OpReply, OpMsgReply)):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
return OpReply(*args, **kwargs)
def make_op_msg_reply(*args, **kwargs):
# Error we might raise.
if args and isinstance(args[0], (OpReply, OpMsgReply)):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
return OpMsgReply(*args, **kwargs)
def unprefixed(bson_str):
rep = unicode(repr(bson_str))
if rep.startswith(u'u"') or rep.startswith(u"u'"):
return rep[1:]
else:
return rep
def docs_repr(*args):
"""Stringify ordered dicts like a regular ones.
Preserve order, remove 'u'-prefix on unicodes in Python 2:
>>> print(docs_repr(OrderedDict([(u'_id', 2)])))
{"_id": 2}
>>> print(docs_repr(OrderedDict([(u'_id', 2), (u'a', u'b')]),
... OrderedDict([(u'a', 1)])))
{"_id": 2, "a": "b"}, {"a": 1}
>>>
>>> import datetime
>>> now = datetime.datetime.utcfromtimestamp(123456)
>>> print(docs_repr(OrderedDict([(u'ts', now)])))
{"ts": {"$date": 123456000}}
>>>
>>> oid = bson.ObjectId(b'123456781234567812345678')
>>> print(docs_repr(OrderedDict([(u'oid', oid)])))
{"oid": {"$oid": "123456781234567812345678"}}
"""
sio = StringIO()
for doc_idx, doc in enumerate(args):
if doc_idx > 0:
sio.write(u', ')
sio.write(text_type(json_util.dumps(doc)))
return sio.getvalue()
def seq_match(seq0, seq1):
"""True if seq0 is a subset of seq1 and their elements are in same order.
>>> seq_match([], [])
True
>>> seq_match([1], [1])
True
>>> seq_match([1, 1], [1])
False
>>> seq_match([1], [1, 2])
True
>>> seq_match([1, 1], [1, 1])
True
>>> seq_match([3], [1, 2, 3])
True
>>> seq_match([1, 3], [1, 2, 3])
True
>>> seq_match([2, 1], [1, 2, 3])
False
"""
len_seq1 = len(seq1)
if len_seq1 < len(seq0):
return False
seq1_idx = 0
for i, elem in enumerate(seq0):
while seq1_idx < len_seq1:
if seq1[seq1_idx] == elem:
break
seq1_idx += 1
if seq1_idx >= len_seq1 or seq1[seq1_idx] != elem:
return False
seq1_idx += 1
return True
def format_call(frame):
fn_name = inspect.getframeinfo(frame)[2]
arg_info = inspect.getargvalues(frame)
args = [repr(arg_info.locals[arg]) for arg in arg_info.args]
varargs = [repr(x) for x in arg_info.locals[arg_info.varargs]]
kwargs = [', '.join("%s=%r" % (key, value) for key, value in
arg_info.locals[arg_info.keywords].items())]
return '%s(%s)' % (fn_name, ', '.join(args + varargs + kwargs))
def raise_args_err(message='bad arguments', error_class=TypeError):
"""Throw an error with standard message, displaying function call.
>>> def f(a, *args, **kwargs):
... raise_args_err()
...
>>> f(1, 2, x='y')
Traceback (most recent call last):
...
TypeError: bad arguments: f(1, 2, x='y')
"""
frame = inspect.currentframe().f_back
raise error_class(message + ': ' + format_call(frame))
def interactive_server(port=27017, verbose=True, all_ok=False, name='MockupDB',
ssl=False, uds_path=None):
"""A `MockupDB` that the mongo shell can connect to.
Call `~.MockupDB.run` on the returned server, and clean it up with
`~.MockupDB.stop`.
If ``all_ok`` is True, replies {ok: 1} to anything unmatched by a specific
responder.
"""
if uds_path is not None:
port = None
server = MockupDB(port=port,
verbose=verbose,
request_timeout=int(1e6),
ssl=ssl,
auto_ismaster=True,
uds_path=uds_path)
if all_ok:
server.autoresponds({})
server.autoresponds('whatsmyuri', you='localhost:12345')
server.autoresponds({'getLog': 'startupWarnings'},
log=['hello from %s!' % name])
server.autoresponds(OpMsg('buildInfo'), version='MockupDB ' + __version__)
server.autoresponds(OpMsg('listCollections'))
server.autoresponds('replSetGetStatus', ok=0)
server.autoresponds('getFreeMonitoringStatus', ok=0)
return server
|
binaries.py
|
# Lint-as: python3
"""Utilities for locating and invoking compiler tool binaries."""
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import importlib
import io
import os
import platform
import subprocess
import sys
import textwrap
import threading
from typing import List, Optional, Union
__all__ = [
"find_tool",
"invoke_immediate",
"invoke_pipeline",
"get_tool_path",
"CompilerToolError",
]
_BUILTIN_TOOLS = [
"ireec",
"iree-translate",
]
# In normal distribution circumstances, each named tool is associated with
# a python module that provides a `get_tool` function for getting its absolute
# path. This dictionary maps the tool name to the module.
_TOOL_MODULE_MAP = {
"iree-import-tflite": "iree.tools.tflite",
"iree-import-xla": "iree.tools.xla",
"iree-import-tf": "iree.tools.tf",
}
# Map of tool module to package name as distributed to archives (used for
# error messages).
_TOOL_MODULE_PACKAGES = {
"iree.tools.tf": "iree-tools-tf",
"iree.tools.tflite": "iree-tools-tflite",
"iree.tools.xla": "iree-tools-xla",
}
# Environment variable holding directories to be searched for named tools.
# Delimitted by os.pathsep.
_TOOL_PATH_ENVVAR = "IREE_TOOL_PATH"
class CompilerToolError(Exception):
"""Compiler exception that preserves the command line and error output."""
def __init__(self, process: subprocess.CompletedProcess):
try:
errs = process.stderr.decode("utf-8")
except:
errs = str(process.stderr) # Decode error or other: best we can do.
tool_name = os.path.basename(process.args[0])
super().__init__(f"Error invoking IREE compiler tool {tool_name}\n"
f"Diagnostics:\n{errs}\n\n"
f"Invoked with:\n {tool_name} {' '.join(process.args)}")
def get_tool_path() -> List[str]:
"""Returns list of paths to search for tools."""
list_str = os.environ.get(_TOOL_PATH_ENVVAR)
if not list_str:
return []
return list_str.split(os.pathsep)
def find_tool(exe_name: str) -> str:
"""Finds a tool by its (extension-less) executable name.
Args:
exe_name: The name of the executable (extension-less).
Returns:
An absolute path to the tool.
Raises:
ValueError: If the tool is not known or not found.
"""
is_builtin = exe_name in _BUILTIN_TOOLS
if not is_builtin and exe_name not in _TOOL_MODULE_MAP:
raise ValueError(f"IREE compiler tool '{exe_name}' is not a known tool")
# First search an explicit tool path.
tool_path = get_tool_path()
for path_entry in tool_path:
if not path_entry:
continue
candidate_exe = os.path.join(path_entry, exe_name)
if os.path.isfile(candidate_exe) and os.access(candidate_exe, os.X_OK):
return candidate_exe
if is_builtin:
# Get builtin tool.
candidate_exe = _get_builtin_tool(exe_name)
else:
# Attempt to load the tool module.
tool_module_name = _TOOL_MODULE_MAP[exe_name]
tool_module_package = _TOOL_MODULE_PACKAGES[tool_module_name]
try:
tool_module = importlib.import_module(tool_module_name)
except ModuleNotFoundError:
raise ValueError(
f"IREE compiler tool '{exe_name}' is not installed (it should have been "
f"found in the python module '{tool_module_name}', typically installed "
f"via the package {tool_module_package}).\n\n"
f"Either install the package or set the {_TOOL_PATH_ENVVAR} environment "
f"variable to contain the path of the tool executable "
f"(current {_TOOL_PATH_ENVVAR} = {repr(tool_path)})") from None
# Ask the module for its tool.
candidate_exe = tool_module.get_tool(exe_name)
if (not candidate_exe or not os.path.isfile(candidate_exe) or
not os.access(candidate_exe, os.X_OK)):
raise ValueError(
f"IREE compiler tool '{exe_name}' was located in module "
f"'{tool_module_name}' but the file was not found or not executable: "
f"{candidate_exe}")
return candidate_exe
def _get_builtin_tool(exe_name: str) -> Optional[str]:
# Transitional note: iree-translate is allowed and resolves to "ireec".
if exe_name == "iree-translate":
exe_name = "ireec"
if platform.system() == "Windows":
exe_name = exe_name + ".exe"
this_path = os.path.dirname(__file__)
tool_path = os.path.join(this_path, "..", "_mlir_libs", exe_name)
return tool_path
def invoke_immediate(command_line: List[str],
*,
input_file: Optional[bytes] = None,
immediate_input=None):
"""Invokes an immediate command.
This is separate from invoke_pipeline as it is simpler and supports more
complex input redirection, using recommended facilities for sub-processes
(less magic).
Note that this differs from the usual way of using subprocess.run or
subprocess.Popen().communicate() because we need to pump all of the error
streams individually and only pump pipes not connected to a different stage.
Uses threads to pump everything that is required.
"""
run_args = {}
input_file_handle = None
stderr_handle = sys.stderr
try:
# Redirect input.
if input_file is not None:
input_file_handle = open(input_file, "rb")
run_args["stdin"] = input_file_handle
elif immediate_input is not None:
run_args["input"] = immediate_input
# Capture output.
# TODO(#4131) python>=3.7: Use capture_output=True.
run_args["stdout"] = subprocess.PIPE
run_args["stderr"] = subprocess.PIPE
process = subprocess.run(command_line, **run_args)
if process.returncode != 0:
raise CompilerToolError(process)
# Emit stderr contents.
_write_binary_stderr(stderr_handle, process.stderr)
return process.stdout
finally:
if input_file_handle:
input_file_handle.close()
def invoke_pipeline(command_lines: List[List[str]], immediate_input=None):
"""Invoke a pipeline of commands.
The first stage of the pipeline will have its stdin set to DEVNULL and each
subsequent stdin will derive from the prior stdout. The final stdout will
be accumulated and returned. All stderr contents are accumulated and printed
to stderr on completion or the first failing stage of the pipeline will have
an exception raised with its stderr output.
"""
stages = []
pipeline_input = (subprocess.DEVNULL
if immediate_input is None else subprocess.PIPE)
prev_out = pipeline_input
stderr_handle = sys.stderr
# Create all stages.
for i in range(len(command_lines)):
command_line = command_lines[i]
popen_args = {
"stdin": prev_out,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
process = subprocess.Popen(command_line, **popen_args)
prev_out = process.stdout
capture_output = (i == (len(command_lines) - 1))
stages.append(_PipelineStage(process, capture_output))
# Start stages.
for stage in stages:
stage.start()
# Pump input.
pipe_success = True
if immediate_input is not None:
try:
pipe_success = False
stages[0].process.stdin.write(immediate_input)
pipe_success = True
finally:
stages[0].process.stdin.close()
# Join.
for stage in stages:
stage.join()
# Check for errors.
for stage in stages:
assert stage.completed
if stage.completed.returncode != 0:
raise CompilerToolError(stage.completed)
# Broken pipe.
if not pipe_success:
raise CompilerToolError(stages[0].completed)
# Print any stderr output.
for stage in stages:
_write_binary_stderr(stderr_handle, stage.errs)
return stages[-1].outs
class _PipelineStage(threading.Thread):
"""Wraps a process and pumps its handles, waiting for completion."""
def __init__(self, process, capture_output):
super().__init__()
self.process = process
self.capture_output = capture_output
self.completed: Optional[subprocess.CompletedProcess] = None
self.outs = None
self.errs = None
def pump_stderr(self):
self.errs = self.process.stderr.read()
def pump_stdout(self):
self.outs = self.process.stdout.read()
def run(self):
stderr_thread = threading.Thread(target=self.pump_stderr)
stderr_thread.start()
if self.capture_output:
stdout_thread = threading.Thread(target=self.pump_stdout)
stdout_thread.start()
self.process.wait()
stderr_thread.join()
if self.capture_output:
stdout_thread.join()
self.completed = subprocess.CompletedProcess(self.process.args,
self.process.returncode,
self.outs, self.errs)
self.process.stderr.close()
self.process.stdout.close()
def _write_binary_stderr(out_handle, contents):
# Fast-paths buffered text-io (which stderr is by default) while allowing
# full decode for non buffered and binary io.
if hasattr(out_handle, "buffer"):
out_handle.buffer.write(contents)
elif isinstance(out_handle, io.TextIOBase):
out_handle.write(contents.decode("utf-8"))
else:
out_handle.write(contents)
|
tcp_server_demo1.py
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/1 上午10:58
# @Author : yidxue
import socket
import threading
import time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send('Welcome!')
while True:
data = sock.recv(1024)
time.sleep(5)
if data == 'exit' or not data:
break
sock.send('Hello: %s!' % data)
sock.close()
print('Connection from %s:%s closed.' % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 9999))
s.listen(5)
print('Waiting for connection...')
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
pranjan77ContigFilterServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'pranjan77ContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from pranjan77ContigFilter.pranjan77ContigFilterImpl import pranjan77ContigFilter
impl_pranjan77ContigFilter = pranjan77ContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'pranjan77ContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
view_tester.py
|
# Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import json
import logging
import sys
import threading
import time
try:
from urllib.request import urlopen
except ImportError:
# pylint: disable=import-error
from urllib2 import urlopen
from guild import util
log = logging.getLogger("guild")
def start_tester(host, port, exit=None):
if exit is None:
exit = lambda _code: None
tester = threading.Thread(target=_test_view, args=(host, port, exit))
tester.start()
def _test_view(host, port, exit_f):
view_url = util.local_server_url(host, port)
try:
_wait_for(view_url)
_test_runs(view_url)
_test_tensorboard(view_url)
except Exception:
log.exception("testing %s", view_url)
exit_f(1)
else:
exit_f(0)
def _wait_for(url):
_urlread(url)
def _test_runs(view_url):
runs_url = "{}/runs".format(view_url)
sys.stdout.write("Testing %s\n" % runs_url)
runs_str = _urlread(runs_url)
runs = json.loads(runs_str.decode())
sys.stdout.write(" - Got %i Guild run(s)\n" % len(runs))
sys.stdout.flush()
def _test_tensorboard(view_url):
tb_init_url = "{}/tb/0/".format(view_url)
sys.stdout.write("Initializing TensorBoard at %s\n" % tb_init_url)
_urlread(tb_init_url)
runs_url = "{}/tb/0/data/runs".format(view_url)
sys.stdout.write("Testing %s\n" % runs_url)
runs_str = _urlread(runs_url)
runs = json.loads(runs_str.decode())
sys.stdout.write(" - Got %i TensorBoard run(s)\n" % len(runs))
sys.stdout.flush()
def _urlread(url):
timeout = time.time() + 5 # 5 seconds to connect
while time.time() < timeout:
try:
f = urlopen(url)
except Exception as e:
if 'refused' not in str(e):
raise
time.sleep(1)
else:
return f.read()
raise RuntimeError("connect timeout")
|
PiVideoStream.py
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
unittableview.py
|
import vdomr as vd
import time
import multiprocessing
import sys
from .stdoutsender import StdoutSender
import mtlogging
import numpy as np
import spikeforest_analysis as sa
from spikeforest import EfficientAccessRecordingExtractor
import json
class UnitTableView(vd.Component):
def __init__(self, context):
vd.Component.__init__(self)
self._context = context
self._size = (100, 100)
self._unit_table_widget = None
self._connection_to_init, connection_to_parent = multiprocessing.Pipe()
self._init_process = multiprocessing.Process(target=_initialize, args=(context, connection_to_parent))
self._init_process.start()
self._init_log_text = ''
vd.set_timeout(self._check_init, 0.5)
def _on_init_completed(self, result):
self._unit_table_widget = UnitTableWidget(context=self._context, units_info=result)
self._unit_table_widget.setSize(self._size)
self.refresh()
def setSize(self, size):
self._size = size
if self._unit_table_widget:
self._unit_table_widget.setSize(size)
def size(self):
return self._size
def tabLabel(self):
return 'Unit table'
def render(self):
if self._unit_table_widget:
return vd.div(
self._unit_table_widget
)
else:
return vd.div(
vd.h3('Initializing...'),
vd.pre(self._init_log_text)
)
def _check_init(self):
if not self._unit_table_widget:
if self._connection_to_init.poll():
msg = self._connection_to_init.recv()
if msg['name'] == 'log':
self._init_log_text = self._init_log_text + msg['text']
self.refresh()
elif msg['name'] == 'result':
self._on_init_completed(msg['result'])
return
vd.set_timeout(self._check_init, 1)
class UnitTableWidget(vd.Component):
def __init__(self, *, context, units_info):
vd.Component.__init__(self)
self._size = (100, 100)
self._units_info = units_info
def setSize(self, size):
self._size = size
self.refresh()
def render(self):
return vd.pre(json.dumps(self._units_info, indent=2))
# Initialization in a worker thread
mtlogging.log(root=True)
def _initialize(context, connection_to_parent):
with StdoutSender(connection=connection_to_parent):
print('***** Preparing efficient access recording extractor...')
_ = EfficientAccessRecordingExtractor(recording=context.recordingExtractor())
print('***** computing units info...')
info0 = sa.compute_units_info(recording=context.recordingExtractor(), sorting=context.trueSortingExtractor())
print('*****')
connection_to_parent.send(dict(
name='result',
result=info0
))
|
main_console.py
|
import threading
import time
# global variables
hero_health = 40
orc_health = 7
dragon_health = 20
def thread_orc():
global hero_health
global orc_health
while orc_health > 0 and hero_health > 0:
time.sleep(1.5)
hero_health = hero_health - 1
print("Orc attacked... Hero health: ", hero_health)
def thread_dragon():
global hero_health
global dragon_health
while dragon_health > 0 and hero_health > 0:
time.sleep(2.0)
hero_health = hero_health - 3
print("Dragon attacked... Hero health: ", hero_health)
# making threads for orc and dragon
orc = threading.Thread(target=thread_orc)
dragon = threading.Thread(target=thread_dragon)
# to start the thread
orc.start()
dragon.start()
# main user loop
while hero_health > 0 and orc_health > 0 and dragon_health > 0:
var = input("attack ")
if var == "orc":
orc_health = orc_health - 2
print("Hero attack Orc ... Orc health is ", str(orc_health))
elif var == "dragon":
dragon_health = dragon_health - 2
print("Hero attack dragon ... Dragon health is ", str(dragon_health))
# Wait for threads to finish
orc.join()
dragon.join()
|
test.py
|
# coding=utf-8
import socket # socket模块
import json
import random
import numpy as np
from collections import deque
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Conv2D, Flatten, concatenate
from keras.optimizers import Adam
from math import floor, sqrt
import tensorflow as tf
import subprocess
import time
import psutil
import pyautogui
import os
from multiprocessing import Pool
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
class DQNAgent:
def __init__(self, state_height, state_width, action_size):
# self.state_size = state_size
self.state_height = state_height
self.state_width = state_width
self.action_size = action_size
self.memory1 = deque(maxlen=2000)
self.memory2 = deque(maxlen=1000)
self.gamma = 0.85 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.15
self.epsilon_decay = 0.999
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
input1 = Input(shape=(1, self.state_height, self.state_width))
conv1 = Conv2D(64, (4, 2), strides=1, activation='relu', padding='valid', data_format='channels_first',
input_shape=(1, self.state_height, self.state_width))(input1)
conv2 = Conv2D(64, (4, 2), strides=1, activation='relu', padding='valid')(conv1)
conv3 = Conv2D(3, 1, strides=1, activation='relu', padding='valid')(conv2)
state1 = Flatten()(conv3)
input2 = Input(shape=(3,))
state2 = concatenate([input2, state1])
state2 = Dense(256, activation='relu')(state2)
state2 = Dense(64, activation='relu')(state2)
out_put = Dense(self.action_size, activation='linear')(state2)
model = Model(inputs=[input1, input2], outputs=out_put)
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
# model = Sequential()
# model.add(Conv2D(64, (4, 2), strides=1, activation='relu', padding='valid', data_format='channels_first',
# input_shape=(1, self.state_height, self.state_width)))
# model.add(Conv2D(64, (4, 2), strides=1, activation='relu', padding='valid'))
# model.add(Conv2D(3, 1, strides=1, activation='relu', padding='valid'))
# model.add(Flatten())
# model.add(Dense(256, activation='relu'))
# model.add(Dense(self.action_size, activation='linear'))
# model.compile(loss='mse',
# optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state):
self.memory.append((state, action, reward, next_state))
def act(self, state):
if np.random.rand() <= self.epsilon:
print('random')
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state in minibatch:
# target = reward
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def connect(ser):
conn, addr = ser.accept() # 接受TCP连接,并返回新的套接字与IP地址
print('Connected by', addr) # 输出客户端的IP地址
return conn
def open_ter(loc):
os.system("gnome-terminal -e 'bash -c \"cd " + loc + " && ./path_planning; exec bash\"'")
time.sleep(1)
# return sim
def kill_terminal():
pids = psutil.pids()
for pid in pids:
p = psutil.Process(pid)
if p.name() == "gnome-terminal-server":
os.kill(pid, 9)
def close_all(sim):
if sim.poll() is None:
sim.terminate()
sim.wait()
time.sleep(2)
kill_terminal()
EPISODES = 20
location = "your_path_to/CarND-test/build"
HOST = '127.0.0.1'
PORT = 1234
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 定义socket类型,网络通信,TCP
server.bind((HOST, PORT)) # 套接字绑定的IP与端口
server.listen(1) # 开始TCP监听
state_height = 45
state_width = 3
action_size = 3
batch_size = 32
episode = 1
agent = DQNAgent(state_height, state_width, action_size)
agent.epsilon = 0.0
agent.epsilon_min = 0.0
agent.load("episode100.h5") # " + str(episode) + "
# p1 = Process(target = connect, args = (server,))
# p2 = Process(target = open_sim, args = (location,))
while episode <= EPISODES:
pool = Pool(processes=2)
result = []
result.append(pool.apply_async(connect, (server,)))
pool.apply_async(open_ter, (location,))
pool.close()
pool.join()
conn = result[0].get()
sim = subprocess.Popen('../../../term3_sim_linux/term3_sim.x86_64')
time.sleep(2)
pyautogui.click(x=1164, y=864, button='left')
time.sleep(6)
pyautogui.click(x=465, y=535, button='left')
try:
data = conn.recv(2000) # 把接收的数据实例化
except Exception as e:
# close_all(sim)
continue
while not data:
try:
data = conn.recv(2000) # 把接收的数据实例化
except Exception as e:
# close_all(sim)
continue
data = bytes.decode(data)
# print(data)
j = json.loads(data)
# Main car's localization Data
car_x = j[1]['x']
car_y = j[1]['y']
car_s = j[1]['s']
car_d = j[1]['d']
car_yaw = j[1]['yaw']
car_speed = j[1]['speed']
# Sensor Fusion Data, a list of all other cars on the same side of the road.\
sensor_fusion = j[1]['sensor_fusion']
grid = np.ones((51, 3))
ego_car_lane = int(floor(car_d/4))
grid[31:35, ego_car_lane] = car_speed / 100.0
# sensor_fusion_array = np.array(sensor_fusion)
for i in range(len(sensor_fusion)):
vx = sensor_fusion[i][3]
vy = sensor_fusion[i][4]
s = sensor_fusion[i][5]
d = sensor_fusion[i][6]
check_speed = sqrt(vx * vx + vy * vy)
car_lane = int(floor(d / 4))
if 0 <= car_lane < 3:
s_dis = s - car_s
if -36 < s_dis < 66:
pers = - int(floor(s_dis / 2.0)) + 30
grid[pers:pers + 4, car_lane] = - check_speed / 100.0 * 2.237
state = np.zeros((state_height, state_width))
state[:, :] = grid[3:48, :]
state = np.reshape(state, [-1, 1, state_height, state_width])
pos = [car_speed / 50, 0, 0]
if ego_car_lane == 0:
pos = [car_speed / 50, 0, 1]
elif ego_car_lane == 1:
pos = [car_speed / 50, 1, 1]
elif ego_car_lane == 2:
pos = [car_speed / 50, 1, 0]
pos = np.reshape(pos, [1, 3])
# print(state)
action = 0
mess_out = str(action)
mess_out = str.encode(mess_out)
conn.sendall(mess_out)
count = 0
start = time.time()
while True:
now = time.time()
if (now - start) / 60 > 15:
# close_all(sim)
break
try:
data = conn.recv(2000) # 把接收的数据实例化
except Exception as e:
pass
while not data:
try:
data = conn.recv(2000) # 把接收的数据实例化
except Exception as e:
pass
data = bytes.decode(data)
if data == "over":
print("episode:{}".format(episode))
# close_all(sim)
conn.close() # 关闭连接
episode = episode + 1
break
j = json.loads(data)
last_state = state
last_pos = pos
last_act = action
last_lane = ego_car_lane
# Main car's localization Data
car_x = j[1]['x']
car_y = j[1]['y']
car_s = j[1]['s']
car_d = j[1]['d']
car_yaw = j[1]['yaw']
car_speed = j[1]['speed']
print(car_s)
if car_speed == 0:
mess_out = str(0)
mess_out = str.encode(mess_out)
conn.sendall(mess_out)
continue
# Sensor Fusion Data, a list of all other cars on the same side of the road.
sensor_fusion = j[1]['sensor_fusion']
ego_car_lane = int(floor(car_d / 4))
if last_act == 0:
last_reward = 2 * (2 * ((j[3] - 35.0) / 5.0)) # - abs(ego_car_lane - 1))
else:
last_reward = 2 * (2 * ((j[3] - 35.0) / 5.0)) - 6.0
if grid[3:31, last_lane].sum() > 27 and last_act != 0:
last_reward = -60.0
grid = np.ones((51, 3))
grid[31:35, ego_car_lane] = car_speed / 100.0
# sensor_fusion_array = np.array(sensor_fusion)
for i in range(len(sensor_fusion)):
vx = sensor_fusion[i][3]
vy = sensor_fusion[i][4]
s = sensor_fusion[i][5]
d = sensor_fusion[i][6]
check_speed = sqrt(vx * vx + vy * vy)
car_lane = int(floor(d / 4))
if 0 <= car_lane < 3:
s_dis = s - car_s
if -36 < s_dis < 66:
pers = - int(floor(s_dis / 2.0)) + 30
grid[pers:pers + 4, car_lane] = - check_speed / 100.0 * 2.237
if j[2] < -10:
last_reward = float(j[2]) # reward -50, -100
last_reward = last_reward / 20.0
state = np.zeros((state_height, state_width))
state[:, :] = grid[3:48, :]
state = np.reshape(state, [-1, 1, state_height, state_width])
# print(state)
pos = [car_speed / 50, 0, 0]
if ego_car_lane == 0:
pos = [car_speed / 50, 0, 1]
elif ego_car_lane == 1:
pos = [car_speed / 50, 1, 1]
elif ego_car_lane == 2:
pos = [car_speed / 50, 1, 0]
pos = np.reshape(pos, [1, 3])
# print(state)
action = agent.act([state, pos])
if action != 0:
print(action)
mess_out = str(action)
mess_out = str.encode(mess_out)
conn.sendall(mess_out)
|
test_stream_xep_0059.py
|
import threading
from sleekxmpp.test import *
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins.xep_0030 import DiscoItems
from sleekxmpp.plugins.xep_0059 import ResultIterator, Set
class TestStreamSet(SleekTest):
def setUp(self):
register_stanza_plugin(DiscoItems, Set)
def tearDown(self):
self.stream_close()
def iter(self, rev=False):
q = self.xmpp.Iq()
q['type'] = 'get'
it = ResultIterator(q, 'disco_items', amount='1', reverse=rev)
for i in it:
for j in i['disco_items']['items']:
self.items.append(j[0])
def testResultIterator(self):
self.items = []
self.stream_start(mode='client')
t = threading.Thread(target=self.iter)
t.start()
self.send("""
<iq type="get" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
<last>item1</last>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<after>item1</after>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
<last>item2</last>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<after>item2</after>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
</set>
</query>
</iq>
""")
t.join()
self.failUnless(self.items == ['item1', 'item2'])
def testResultIteratorReverse(self):
self.items = []
self.stream_start(mode='client')
t = threading.Thread(target=self.iter, args=(True,))
t.start()
self.send("""
<iq type="get" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before />
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
<first>item2</first>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before>item2</before>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
<first>item1</first>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before>item1</before>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
</set>
</query>
</iq>
""")
t.join()
self.failUnless(self.items == ['item2', 'item1'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamSet)
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir, disabled
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
return ['-s', 'ASYNCIFY']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', path_from_root('tests', 'test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([path_from_root('tests', 'fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(path_from_root('tests', 'third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_test_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), 0, args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_dispatch_after_exit.c'), 0, args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected=0, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest_exit('binaryen_async.c', expected=expect, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', expected=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', expected='0', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind'] + self.get_async_args())
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
timo.py
|
# -*- coding: utf-8 -*-
import os,sys
from urllib.request import Request,urlopen
import tinify
import threading
import time
import base64
from PIL import Image
import shutil
import json
keys = ["yourkey1","yourkey2","yourkey3"]
suffix = ["png","jpg","JPG","JEPG","jepg","PNG"]
suffixMp3 = ["mp3","MP3"]
global timo_has_done
timo_has_done = 0
global base64_has_done
base64_has_done = 0
def get_image_filenames(input_dir):
image_name_list = []
for fn in os.listdir(input_dir):
if len(fn.split("."))>1:
if fn.split(".")[-1] in suffix:
image_name_list.append(fn)
return image_name_list
image_name_list = []
def get_all_images(input_dir):
for fn in os.listdir(input_dir):
filepath = os.path.join(input_dir, fn)
if os.path.isdir(filepath):
get_all_images(filepath)
if len(filepath.split("."))>1:
if filepath.split(".")[-1] in suffix:
image_name_list.append(filepath)
return image_name_list
audio_name_list = []
def get_all_audios(input_dir):
for fn in os.listdir(input_dir):
filepath = os.path.join(input_dir, fn)
if os.path.isdir(filepath):
get_all_audios(filepath)
if len(filepath.split("."))>1:
if filepath.split(".")[-1] in suffixMp3:
audio_name_list.append(filepath)
return audio_name_list
def removeDir(dirPath):
if not os.path.isdir(dirPath):
return
files = os.listdir(dirPath)
for file in files:
filePath = os.path.join(dirPath, file)
if os.path.isfile(filePath):
os.remove(filePath)
elif os.path.isdir(filePath):
removeDir(filePath)
os.rmdir(dirPath)
def dirlist(path, allfile,now_dir_name,new_dir_name):
filelist = os.listdir(path)
for filename in filelist:
filepath = os.path.join(path, filename)
if os.path.isdir(filepath):
dirlist(filepath, allfile,now_dir_name,new_dir_name)
allfile.append(filepath.replace(now_dir_name,new_dir_name,1))
return allfile
#mode 1 根据百分比;2 根据宽
def do_timo(filename,total,now_dir_name,new_dir_name,index,scale_num,mode):
out_name = filename.replace(now_dir_name,new_dir_name,1)
if mode is 2:
scaleWidth = scale_num
if scaleWidth is not None and scaleWidth is not "":
source = tinify.from_file(filename)
resized = source.resize(
method="scale",
width = scaleWidth
)
resized.to_file(out_name)
else:
source = tinify.from_file(filename)
source.to_file(out_name)
else:
scale_percentage = scale_num
if scale_percentage < 1:
image_width = Image.open(filename).size[0]
image_height = Image.open(filename).size[1]
source = tinify.from_file(filename)
resized = source.resize(
method="fit",
width = int(image_width*scale_percentage),
height = int(image_height*scale_percentage)
)
resized.to_file(out_name)
else:
source = tinify.from_file(filename)
source.to_file(out_name)
global timo_has_done
timo_has_done += 1
print("{0} has done ,total {1},completed {2}".format(out_name,total,timo_has_done))
preimage = "data:image/png;base64,"
def do_base64(filename,total,now_dir_name,new_dir_name,index):
from_png = open(filename,'rb')
base_temp = base64.b64encode(from_png.read())
to_txt = open(filename.replace(now_dir_name,new_dir_name)+".txt",'w')
to_txt.write(preimage+str(base_temp)[2:-1])
from_png.close()
to_txt.close()
global base64_has_done
base64_has_done += 1
print("{0} has done ,total {1},completed {2}".format(filename.replace(now_dir_name,new_dir_name)+".txt",total,base64_has_done))
preaudio = "data:audio/mp3;base64,"
def do_base64_audio(filename,total,now_dir_name,new_dir_name,index):
from_png = open(filename,'rb')
base_temp = base64.b64encode(from_png.read())
to_txt = open(filename.replace(now_dir_name,new_dir_name)+".txt",'w')
to_txt.write(preaudio+str(base_temp)[2:-1])
from_png.close()
to_txt.close()
global base64_has_done
base64_has_done += 1
print("{0} has done ,total {1},completed {2}".format(filename.replace(now_dir_name,new_dir_name)+".txt",total,base64_has_done))
def do_tinify_list():
print("validate keys ...")
input_dir = sys.argv[1]
now_dir_name = input_dir.split("\\")[-1]
new_dir_name = "new_image_{0}".format(int(time.time()))
output_dir = "./"+new_dir_name
#建立新的目录及子目录
dir_list = dirlist(input_dir,[],now_dir_name,new_dir_name)
os.mkdir(output_dir)
for dir_path in dir_list[::-1]:
os.mkdir(dir_path)
image_names = get_all_images(input_dir)
total = len(image_names)
#判断使用哪个Key,一个Key只能使用500次
key_effective = False
for key in keys:
tinify.key = key
tinify.validate()
remain = 500-tinify.compression_count
if remain>total:
key_effective = True
break
if key_effective:
print("now_key:"+tinify.key)
#get scale
mode = input("\nselect scale mode :\n[1]scale by percentage\n[2]scale by fixed width\nenter your choice (default=1):")
if mode is not "2" :
mode = 1
else:
mode = 2
print("Scale mode is {0}".format(mode))
if mode is 1:
scale_percentage = input("enter scale_percentage,0<scale_percentage<=100,default = 100 :")
try:
scale_percentage = float(scale_percentage);
if scale_percentage>=100.0 or scale_percentage<0:
scale_percentage = 100.0
except:
scale_percentage = 100.0
print("convert scale_percentage is "+str(scale_percentage)+"%")
scale_percentage = scale_percentage/100
print("convert start ...")
threads = []
for index,image_name in enumerate(image_names):
#threads.append(threading.Thread(target=do_timo,args=(image_name,total,now_dir_name,new_dir_name,index,scale_percentage,2)))
threads.append(threading.Thread(target=do_timo,args=(image_name,total,now_dir_name,new_dir_name,index,scale_percentage,1)))
for t in threads:
t.start()
elif mode is 2:
fixedWidth = input("enter fixed width(integer) ,default or wrong input cause no scale:")
try:
fixedWidth = int(fixedWidth)
print("fixed width is {0}".format(fixedWidth))
except:
fixedWidth = None
print("will be no scale")
print("convert start ...")
threads = []
for index,image_name in enumerate(image_names):
#threads.append(threading.Thread(target=do_timo,args=(image_name,total,now_dir_name,new_dir_name,index,scale_percentage,2)))
threads.append(threading.Thread(target=do_timo,args=(image_name,total,now_dir_name,new_dir_name,index,fixedWidth,2)))
for t in threads:
t.start()
else:
print("please check your key list,compression count may be full !!!")
def do_base64_list():
input_dir = sys.argv[1]
now_dir_name = input_dir.split("\\")[-1]
new_dir_name = "base64_"+now_dir_name
output_dir = "./"+new_dir_name
#建立新的目录及子目录
dir_list = dirlist(input_dir,[],now_dir_name,new_dir_name)
if os.path.exists(output_dir):
removeDir(output_dir)
os.mkdir(output_dir)
for dir_path in dir_list[::-1]:
os.mkdir(dir_path)
image_names = get_all_images(input_dir)
total = len(image_names)
print("convert base64 start ...")
threads = []
for index,image_name in enumerate(image_names):
threads.append(threading.Thread(target=do_base64,args=(image_name,total,now_dir_name,new_dir_name,index)))
for t in threads:
t.start()
def do_base64_list_audio():
input_dir = sys.argv[1]
now_dir_name = input_dir.split("\\")[-1]
new_dir_name = "base64_"+now_dir_name
output_dir = "./"+new_dir_name
#建立新的目录及子目录
dir_list = dirlist(input_dir,[],now_dir_name,new_dir_name)
if os.path.exists(output_dir):
removeDir(output_dir)
os.mkdir(output_dir)
for dir_path in dir_list[::-1]:
os.mkdir(dir_path)
audio_names = get_all_audios(input_dir)
total = len(audio_names)
print("convert base64 start ...")
threads = []
for index,audio_name in enumerate(audio_names):
threads.append(threading.Thread(target=do_base64_audio,args=(audio_name,total,now_dir_name,new_dir_name,index)))
for t in threads:
t.start()
def do_clip_rename(start_num):
input_dir = sys.argv[1]
temp_dir_name = "temp_{0}".format(int(time.time()))
os.mkdir(input_dir+"\\"+temp_dir_name)
image_names = get_image_filenames(input_dir)
try:
image_names.sort(key=lambda x:int(x.split(".")[-2]))
except:
image_names = get_image_filenames(input_dir)
print(image_names)
for i in image_names:
shutil.move(input_dir+"\\"+i,input_dir+"\\"+temp_dir_name+"\\"+i)
for i in image_names:
print(i);
shutil.move(input_dir+"\\"+temp_dir_name+"\\"+i,input_dir+"\\"+str(start_num)+"."+i.split(".")[-1])
start_num = start_num+1
shutil.rmtree(input_dir+"\\"+temp_dir_name)
def do_get_all_imagePath():
input_dir = sys.argv[1]
fo = open("foo.txt", "wb")
json_str = json.dumps(get_all_images(input_dir))
fo.write(str(json_str).replace("\\\\","\\").replace(input_dir,input_dir.split("\\")[-1]).replace("\\","/").encode(encoding="utf-8"));
fo.close()
def doOnceF():
filecount = 0
input_dir = sys.argv[1]
now_dir_name = input_dir.split("\\")[-1]
suffix64 = "png"
for root,dir,files in os.walk(input_dir):
filecount+=len(files)
if(os.path.exists(input_dir+"/0.png")):
suffix64 = "png"
if(os.path.exists(input_dir+"/0.jpg")):
suffix64 = "jpg"
if(os.path.exists(input_dir+"/0.JPG")):
suffix64 = "jpg"
if(os.path.exists(input_dir+"/0.PNG")):
suffix64 = "png"
to_txt = open("{0}.txt".format(now_dir_name),'w')
for i in range(0,filecount):
print("{0}\\{1}.{2}".format(input_dir,i,suffix64).replace("\\","/"))
from_png = open("{0}\\{1}.{2}".format(input_dir,i,suffix64).replace("\\","/"),'rb')
base_temp = base64.b64encode(from_png.read())
if i == 0 :
to_txt.write("[")
if i != filecount-1:
to_txt.write("'{0}{1}',".format(preimage,str(base_temp)[2:-1]))
else:
to_txt.write("'{0}{1}']".format(preimage,str(base_temp)[2:-1]))
from_png.close()
to_txt.close()
def doMakeSplitSheet():
input_dir = sys.argv[1]+"\\"
#0 vertical 1 horizontal
imageMode = 0
#冗余量
ras = 20
if os.path.exists(input_dir+"sheet.png"):
os.remove(input_dir+"sheet.png")
if os.path.exists(input_dir+"sheetfoo.txt"):
os.remove(input_dir+"sheetfoo.txt")
for root,dirs,files in os.walk(input_dir):
imageNames = files
imageNames.sort(key= lambda x:int(x[:-4]))
firstImage = Image.open(input_dir+imageNames[0])
w,h = firstImage.size
if h > w:
imageMode = 1
if imageMode == 0:
sheetSize = (w,(h+ras*2)*len(imageNames))
else:
sheetSize = ((w+ras*2)*len(imageNames),h)
sheet = Image.new("RGBA",sheetSize,(0,0,0,0))
for index,fileName in enumerate(imageNames):
img = Image.open(input_dir+fileName)
if imageMode == 0:
box = (0,h*index+ras*(index+1)+ras*index,w,h+h*index+ras*(index+1)+ras*index)
else:
box = (w*index+ras*(index+1)+ras*index,0,w+w*index+ras*(index+1)+ras*index,h)
print(box)
sheet.paste(img,box)
sheet.save("sheet.png","PNG")
if imageMode == 1 :
css = ".image-sheet{\n width:"+str(w+2*ras)+"px;\n height:"+str(h)+"px;\nposition: absolute;\nbackground-size: "+str(len(imageNames))+"00% 100%;\nbackground-position: 0% 0%;\nbackground-image: url(sheet.png);\n-webkit-animation: imageSheet infinite 1s steps("+str(len(imageNames))+",start);\nanimation: imageSheet infinite 2s steps("+str(len(imageNames))+",start);\n}\n@keyframes imageSheet{\n0% {background-position: "+str(len(imageNames))+"00% 0%}\n100% {background-position: 0% 0%}\n}\n@-webkit-keyframes imageSheet{\n0% {background-position: "+str(len(imageNames))+"00% 0%}\n100% {background-position: 0% 0%}\n}"
else:
css = ".image-sheet{\n width:"+str(w)+"px;\n height:"+str(h+2*ras)+"px;\nposition: absolute;\nbackground-size: 100% "+str(len(imageNames))+"00%;\nbackground-position: 0% 0%;\nbackground-image: url(sheet.png);\n-webkit-animation: imageSheet infinite 1s steps("+str(len(imageNames))+",start);\nanimation: imageSheet infinite 2s steps("+str(len(imageNames))+",start);\n}\n@keyframes imageSheet{\n0% {background-position: 0% "+str(len(imageNames))+"00%}\n100% {background-position: 0% 0%}\n}\n@-webkit-keyframes imageSheet{\n0% {background-position: 0% "+str(len(imageNames))+"00%}\n100% {background-position: 0% 0%}\n}"
fo = open("sheetfoo.txt", "w")
fo.write(css)
fo.close()
def convertPng():
input_dir = "test3"+"\\"
new_dir_name = "jpg_image_{0}".format(int(time.time()))
output_dir = "./"+new_dir_name
os.mkdir(output_dir)
for root,dirs,files in os.walk(input_dir):
imageNames = files
for image in imageNames:
if image.split(".")[-1] in ["png","PNG"]:
im = Image.open(input_dir+image)
background = Image.new('RGB', im.size, (255, 255, 255))
background.paste(im, mask=im.split()[3])
saveName = image.split(".")
saveName[-1] = "jpg"
im.save(output_dir+"\\"+".".join(saveName))
def justForHuige():
prejpeg = "data:image/jpeg;base64,"
input_dir = sys.argv[1]
now_dir_name = input_dir.split("\\")[-1]
new_dir_name = "sprit_base64_{0}".format(int(time.time()))
output_dir = "./"+new_dir_name
imagecount = 0
for root,dirs,files in os.walk(input_dir): #遍历统计
for each in files:
imagecount += 1
data = []
for i in range(0,imagecount):
from_jpg = open(input_dir+"\\"+str(i)+".jpg",'rb')
base_temp = base64.b64encode(from_jpg.read())
data.append(prejpeg+str(base_temp)[2:-1])
from_jpg.close()
to_txt = open(new_dir_name+".txt",'w')
to_txt.write(json.dumps(data,ensure_ascii=False))
to_txt.close()
def main():
if(len(sys.argv)>1):
feature = input("choose function: \n[1]tinify\n[2]ImageToBase64;\n[3]clip image rename\n[4]get all imagePath\n[5]AudioToBase64;\n[7]makeSpritSheet;\n[8]converPng;\n[9]sprit base64 (sequence's suffix must be .jpg;file name must be like 0.jpg ~ 222.jpg);\nenter your choice and press enter:")
#tinify
if feature == "1":
do_tinify_list()
#base64
elif feature == "2":
do_base64_list()
elif feature == "3":
start_num = input("enter start number(default=0): ")
try:
start_num = int(start_num)
except:
start_num = 0
print("start_num is {0}".format(start_num))
do_clip_rename(start_num)
elif feature == "4":
do_get_all_imagePath()
elif feature == "5":
do_base64_list_audio()
elif feature == "6":
doOnceF()
elif feature == "7":
doMakeSplitSheet()
elif feature == "8":
convertPng()
elif feature == "9":
justForHuige()
if __name__ == '__main__':
main()
|
email.py
|
# from threading import Thread
# from flask import render_template
# from flask_mail import Message
# from app import app, mail
# def send_async_email(app, msg):
# with app.app_context():
# mail.send(msg)
# def send_email(subject, sender, recipients, text_body, html_body):
# msg = Message(subject, sender=sender, recipients=recipients)
# msg.body = text_body
# msg.html = html_body
# Thread(target=send_async_email, args=(app, msg)).start()
# def send_password_reset_email(user):
# token = user.get_reset_password_token()
# send_email('[Microblog] Reset Your Password',
# sender=app.config['ADMINS'][0],
# recipients=[user.email],
# text_body=render_template('email/reset_password.txt',
# user=user, token=token),
# html_body=render_template('email/reset_password.html',
# user=user, token=token))
|
03.py
|
import time
import threading
def test():
time.sleep(10)
for i in range(1, 10):
print(i)
thread1 = threading.Thread(target=test, daemon=False)
# thread1 = threading.Thread(target=test, daemon=True)
thread1.start()
print('主线程完成了')
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path, prefix) # re-cache
else:
cache = self.cache_labels(cache_path, prefix) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + desc, total=n, initial=n)
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
audio.py
|
from pydub import AudioSegment
import os
import sys
import csv
import threading, queue
import time
from log import Log
from error import Error
import pygame
class Audio(Log, Error):
'''
The Audio object takes the number of handbells and their tuning from the
current ReBel configuration and compares them to the bell spec file of the
previously generated bells, if it exists. If the two sets of parameters
match then the previously generated handbells sounds are read in and if
they do not match or the bell spec file doesn't exist then the bell sounds
are generated from the ReBel configuration using the provided handbell
tenor sound and pitch shifting.
Four different scales are provided that the handbells can be tuned to:
major, natural minor, harmonic minor, and melodic minor. The bells can also
be shifted in pitch by user specified number of octaves and semitones.
Parameters
----------
numberOfBells : int
The total number of bells being rung.
config : Config
The ReBel config instance.
logFile : string
The name and location of the log file to write to.
'''
def __init__(self, numberOfBells, config, logFile):
# Set the working directory based on if ReBel is being run from an
# executable or the Python source code.
if getattr(sys, 'frozen', False):
# In a bundle
self.exeDir = os.path.dirname(sys.executable)
else:
# In normal python
self.exeDir = ""
# Initialise the inherited Log instance.
Log.__init__(self, logFile=logFile)
# Initialise the inherited Error instance.
Error.__init__(self)
self.numberOfBells = numberOfBells
self.config = config
#self.mixer = mixer
self.bellSemitones = []
self.bells = {}
# Define the semistone steps between the notes of the different
# available scales. In the comments below t = tone and s = semitone.
# Major scale formula: t t s t t t s
self.majorScale = [0, 2, 4, 5, 7, 9, 11, 12]
# Natural minor scale: t s t t s t t
self.naturalMinorScale = [0, 2, 3, 5, 7, 8, 10, 12]
# Harmonic minor scale: t s t t 1.5t s
self.harmonicMinorScale = [0, 2, 3, 5, 7, 8, 11, 12]
# Melodic minor scale: t s t t t t s
self.melodicMinorScale = [0, 2, 3, 5, 7, 9, 11, 12]
# Set the scale being used to that given in the config.
if self.config.get('scale') == "major":
self.scale = self.majorScale
elif self.config.get('scale') == "naturalMinor":
self.scale = self.naturalMinorScale
elif self.config.get('scale') == "harmonicMinor":
self.scale = self.harmonicMinorScale
elif self.config.get('scale') == "melodicMinor":
self.scale = self.melodicMinorScale
# Check if the bell sounds already exist and match the given number of
# bells and tuning, if they don't exist or match then generate them.
self.regenerateBells = True
self.checkGeneratedBells()
if self.regenerateBells == True:
self.generateBells()
if self.config.get('regenerateBells') == True:
self.log("[INFO] Config regenerate bells option is True")
self.generateBells()
# Load in the bell sounds.
#self.loadBells()
self.frameRate = 500
self.running = True
self.playBellQueue = queue.Queue()
self.playBellThread = threading.Thread(target=self.playBell, args=(), daemon=True)
self.playBellThread.start()
def checkGeneratedBells(self):
'''
Check whether the bell spec file exists and if it does then check that
the parameters of the number of bells and their tunings as given in the
ReBel config match those in the bell spec file.
'''
# Parameter for check as to whether the bells match or not and so need
# to be read in or not.
self.regenerateBells = True
# Define the file location of the bell spec file.
self.bellSpecFileLocation = os.path.join(self.exeDir, "..", "audio", "bsf")
# Check bell spec file exists.
if os.path.isfile(self.bellSpecFileLocation):
bellSpec = {}
# Read in bell spec file and temporarily save parameters.
with open(self.bellSpecFileLocation, 'r') as bellSpecFile:
bellSpecFile_reader = csv.reader(bellSpecFile, delimiter=":")
for bellSpecLine in bellSpecFile_reader:
bellSpec[bellSpecLine[0]] = bellSpecLine[1]
bellSpec['scale'] = bellSpec['scale'].split(",")
bellSpec['scale'] = [int(b) for b in bellSpec['scale']]
bellSpec['numberOfBells'] = int(bellSpec['numberOfBells'])
bellSpec['octaveShift'] = int(bellSpec['octaveShift'])
bellSpec['pitchShift'] = int(bellSpec['pitchShift'])
# Compare the bell spec file parameters to the ReBel config
# parameters.
if bellSpec['scaleName'] == self.config.get('scale') \
and bellSpec['scale'] == self.scale \
and bellSpec['numberOfBells'] == self.config.get('numberOfBells') \
and bellSpec['octaveShift'] == self.config.get('octaveShift') \
and bellSpec['pitchShift'] == self.config.get('pitchShift') \
and bellSpec['handbellSource'] == self.config.get('handbellSource'):
# If all the parameters match then the bells do not need to be
# generated/regenerated.
self.regenerateBells = False
self.log("[INFO] Config file bell options match bell spec file")
else:
self.log("[INFO] Config file bell options do not match bell spec file, regenerating bells")
def writeBellSpecFile(self):
'''
Write the current number of bells and bell tunings as given in the
ReBel config to the bell spec file.
'''
self.log("[INFO] Writing bell spec file")
# Define the location of the bell spec file.
self.bellSpecFileLocation = os.path.join(self.exeDir, "..", "audio", "bsf")
# Open the file and write the bell parameters of the ReBel config to it.
with open(self.bellSpecFileLocation, 'w') as bellSpecFile:
bellSpecFile.write("{}:{}\n".format("scaleName", self.config.get('scale')))
bellSpecFile.write("{}:".format("scale"))
for i, _ in enumerate(self.scale):
if i > 0:
bellSpecFile.write(",")
bellSpecFile.write("{}".format(self.scale[i]))
bellSpecFile.write("\n")
bellSpecFile.write("{}:{}\n".format("numberOfBells", self.config.get('numberOfBells')))
bellSpecFile.write("{}:{}\n".format("octaveShift", self.config.get('octaveShift')))
bellSpecFile.write("{}:{}\n".format("pitchShift", self.config.get('pitchShift')))
bellSpecFile.write("{}:{}\n".format("handbellSource", self.config.get('handbellSource')))
def generateBells(self):
'''
Generate the bell sounds using the source handbell tenor sound, pitch
shifting, and the bell parameters of the ReBel config.
'''
self.log("[INFO] Generating bells")
# Starting note is always the root note of the scale and so is zero
# semitone steps away from the root note.
self.bellSemitones = [0]
# Append the bell semitone list with the number of semitones between
# the current note of the scale and the root note, doing this until
# the bell semitone list has a length equal to the total number of
# bells being rung.
# The for loop sets the note semitones for bells in whole octaves.
j = 0
for j in range(int(self.numberOfBells/8)):
self.bellSemitones.append(self.scale[1]+j*12)
for i in range(2, 8):
if j*8 + i < self.numberOfBells:
self.bellSemitones.append(self.scale[i]+j*12)
# If the total number of bells is less than one octave then fill the
# semitone list up to that partial octave. Else if the total number of
# bells lies between two whole octaves then append the semitone list
# with that remaining partial octave.
if self.numberOfBells < 8:
for i in range(self.numberOfBells):
if len(self.bellSemitones) < self.numberOfBells:
self.bellSemitones.append(self.scale[i+1])
else:
for i in range(8):
if len(self.bellSemitones) < self.numberOfBells:
self.bellSemitones.append(self.scale[i+1]+(j+1)*12)
# Try to read in the tenor handbell sound from the handbell source
# location. If the handbell source does not exist then try the default
# ReBel handbell source location. If that does not exist too then throw
# an error and quit ReBel.
if self.config.get('handbellSource') == 'abel':
try:
sound = AudioSegment.from_file(self.config.get('abelBellFileLocation'), format="wav")
except:
self.log("[WARNING] Abel handbell source file not found, defaulting to ReBel handbell source file")
try:
sound = AudioSegment.from_file(self.config.get('rebelBellFileLocation'), format="wav")
except:
self.log("ReBel handbell source file not found, terminating program...", printMessage=False)
self.error("ReBel handbell source file not found, terminating program...", 1)
elif self.config.get('handbellSource') == 'rebel':
try:
sound = AudioSegment.from_file(self.config.get('rebelBellFileLocation'), format="wav")
except:
self.log("ReBel handbell source file not found, terminating program...", printMessage=False)
self.error("ReBel handbell source file not found, terminating program...", 1)
else:
self.log("[WARNING] Handbell source not set, defaulting to ReBel handbell source file")
try:
sound = AudioSegment.from_file(self.config.get('rebelBellFileLocation'), format="wav")
except:
self.log("ReBel handbell source file not found, terminating program...", printMessage=False)
self.error("ReBel handbell source file not found, terminating program...", 1)
# Apply high and low spectral filters to the tenor handbell sound to
# improve the sound quality of the bell sounds generated through the
# pitch shifting. Cutoff frequencies have been determined by through
# inspecting the tenor handbell sounds via spectrograms.
if self.config.get('handbellSource') == 'abel':
sound = sound.high_pass_filter(cutoff=500)
sound = sound.high_pass_filter(cutoff=500)
sound = sound.high_pass_filter(cutoff=500)
elif self.config.get('handbellSource') == 'rebel':
sound = sound.high_pass_filter(cutoff=400)
sound = sound.high_pass_filter(cutoff=400)
sound = sound.high_pass_filter(cutoff=400)
sound = sound.low_pass_filter(cutoff=7750)
sound = sound.low_pass_filter(cutoff=7750)
sound = sound.low_pass_filter(cutoff=7750)
# Generate the handbell sounds via pitch shifting done by changing
# sampling rates, with the equation to determine the new sampling being
# newSamplingRate = oldSamplingRate * 2 ^ (O + (S + s) / 12).
# Here O is the number of octaves to shift by, S is the number of
# semitones to shift by and s is the semitone difference between the
# desired note and the root note of the scale, 12 being the number of
# semitones in an octave and converts the semitones to partial octaves.
# Simply copy the tenor handbell sound whilst overriding the original
# sampling rate with the new one to change the pitch to the desired
# note.
for i, semitone in enumerate(self.bellSemitones):
octave = 12
new_sample_rate = int(sound.frame_rate * (2.0 ** (self.config.get('octaveShift') + (self.config.get('pitchShift')+semitone)/octave)))
pitchShifted_sound = sound._spawn(sound.raw_data, overrides={'frame_rate': new_sample_rate})
# The pitch shifting via changing sampling rate inherently changes
# the frame rate of the sound, therefore set all the bell sounds to
# the same frame rate, here chosen to be 44100 Hz.
pitchShifted_sound = pitchShifted_sound.set_frame_rate(44100)
# Apply fade outs to the bell sounds so that they don't ring on for
# too long.
if self.config.get('handbellSource') == 'abel':
fadeTime = int(len(pitchShifted_sound)*0.95)
pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
elif self.config.get('handbellSource') == 'rebel':
fadeTime = int(len(pitchShifted_sound)*0.95)
#pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
#pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
#pitchShifted_sound = pitchShifted_sound.fade_out(fadeTime)
# Save the generated bell sound, with the file name being equal to
# the bell number.
pitchShifted_sound.export(os.path.join(self.exeDir, "..", "audio", "{}.wav".format(self.numberOfBells - i)), format='wav')
# Write the new bell spec file.
self.writeBellSpecFile()
def loadBells(self):
'''
Read in the bell sounds and save them to an internal Audio variable.
'''
self.log("[INFO] Loading in bells")
# Read in the bell sounds using the convention that the file names
# equal the bell numbers.
for i in range(self.numberOfBells):
self.bells[i+1] = self.mixer.Sound(os.path.join(self.exeDir, "..", "audio", "{}.wav".format(i+1)))
def playBell(self):
self.mixer = pygame.mixer
self.mixer.set_num_channels(self.config.get('numberOfBells'))
self.loadBells()
while self.running:
start = time.time()
try:
bellNumber = self.playBellQueue.get_nowait()
except:
pass
else:
self.mixer.Channel(bellNumber-1).play(self.bells[bellNumber])
time.sleep(max(1./self.frameRate - (time.time() - start), 0))
def play(self, bellNumber):
self.playBellQueue.put(bellNumber)
|
pronterface_serial.py
|
from serial.tools import list_ports
# --------------------------------------------------------------
# Printer connection handling
# --------------------------------------------------------------
def connect_to_printer(self, port, baud, dtr):
try:
self.p.connect(port, baud, dtr)
except SerialException as e:
# Currently, there is no errno, but it should be there in the future
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
elif e.errno == 8:
self.logError(_("Error: You don't have permission to open %s.") % port)
self.logError(_("You might need to add yourself to the dialout group."))
else:
self.logError(traceback.format_exc())
# Kill the scope anyway
return False
except OSError as e:
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
else:
self.logError(traceback.format_exc())
return False
self.statuscheck = True
self.status_thread = threading.Thread(target = self.statuschecker)
self.status_thread.start()
return True
def do_connect(self, l):
a = l.split()
p = self.scanserial()
port = self.settings.port
if (port == "" or port not in p) and len(p) > 0:
port = p[0]
baud = self.settings.baudrate or 115200
if len(a) > 0:
port = a[0]
if len(a) > 1:
try:
baud = int(a[1])
except:
self.log("Bad baud value '" + a[1] + "' ignored")
if len(p) == 0 and not port:
self.log("No serial ports detected - please specify a port")
return
if len(a) == 0:
self.log("No port specified - connecting to %s at %dbps" % (port, baud))
if port != self.settings.port:
self.settings.port = port
self.save_in_rc("set port", "set port %s" % port)
if baud != self.settings.baudrate:
self.settings.baudrate = baud
self.save_in_rc("set baudrate", "set baudrate %d" % baud)
self.connect_to_printer(port, baud, self.settings.dtr)
def scanserial(self):
"""scan for available ports. return a list of device names."""
return list_ports.comports()
def online(self):
self.log("\rPrinter is now online")
self.write_prompt()
def do_disconnect(self, l):
self.p.disconnect()
def help_disconnect(self):
self.log("Disconnects from the printer")
def do_block_until_online(self, l):
while not self.p.online:
time.sleep(0.1)
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(DeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_daemon_thread(self):
r, w = self.pipe()
code = textwrap.dedent(f"""
import threading
import sys
channel = open({w}, "w", closefd=False)
def func():
pass
thread = threading.Thread(target=func, daemon=True)
try:
thread.start()
except RuntimeError as exc:
print("ok: %s" % exc, file=channel, flush=True)
else:
thread.join()
print("fail: RuntimeError not raised", file=channel, flush=True)
""")
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
msg = os.read(r, 100).decode().rstrip()
self.assertEqual("ok: daemon thread are not supported "
"in subinterpreters", msg)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
ThreadLocal.py
|
'''
ThreadLocal
每个线程都有自己的数据,使用局部变量,函数调用时麻烦?
总结:
一个ThreadLocal虽然是全局变量,但每个线程都只能读写自己线程内
的独立副本,互不干扰,解决在参数在一个线程中,各个函数间的传递
多任务处理方案:
多进程
多线程
异步IO编程模型(事件驱动编程)
单线程的异步编程:协程
任务分为:
CPU 密集型:涉及大量计算,音视频处理 ---> 最好用C语言或多进程处理
IO 密集型:涉及网络,磁盘IO ---> 多线程
'''
import threading
local_user = threading.local()
def process_pepo():
std = local_user.pepo
print('Hello, %s (in %s)' % (std, threading.current_thread().name))
def process_thread(name):
local_user.pepo = name
process_pepo()
t1 = threading.Thread(target=process_thread, args=('li,'), name="Thread-A")
t2 = threading.Thread(target=process_thread, args=('zhang,'), name="Thread-B")
t1.start()
t2.start()
t1.join()
t2.join()
|
model.py
|
from __future__ import absolute_import
from __future__ import print_function
import copy
import random
from itertools import chain, product
from multiprocessing import Process
from flask import Flask, request, jsonify
from flask.ext.cors import CORS
import numpy as np
from . import scoring
from .dataloader.goboard import GoBoard
from .processor import ThreePlaneProcessor
from six.moves import range
class HTTPFrontend(object):
'''
HTTPFrontend is a simple Flask app served on localhost:8080, exposing a REST API to predict
go moves.
'''
def __init__(self, bot, port=8080):
self.bot = bot
self.port = port
def start_server(self):
''' Start Go model server '''
self.server = Process(target=self.start_service)
self.server.start()
def stop_server(self):
''' Terminate Go model server '''
self.server.terminate()
self.server.join()
def run(self):
''' Run flask app'''
app = Flask(__name__)
CORS(app, resources={r"/prediction/*": {"origins": "*"}})
self.app = app
@app.route('/dist/<path:path>')
def static_file_dist(path):
return open("ui/dist/" + path).read()
@app.route('/large/<path:path>')
def static_file_large(path):
return open("ui/large/" + path).read()
@app.route('/')
def home():
# Inject game data into HTML
board_init = 'initialBoard = ""' # backup variable
board = {}
for row in range(19):
board_row = {}
for col in range(19):
# Get the cell value
cell = str(self.bot.go_board.board.get((col, row)))
# Replace values with numbers
# Value will be be 'w' 'b' or None
cell = cell.replace("None", "0")
cell = cell.replace("b", "1")
cell = cell.replace("w", "2")
# Add cell to row
board_row[col] = int(cell) # must be an int
# Add row to board
board[row] = board_row
board_init = str(board) # lazy convert list to JSON
return open("ui/demoBot.html").read().replace('"__i__"', 'var boardInit = ' + board_init) # output the modified HTML file
@app.route('/sync', methods=['GET', 'POST'])
def exportJSON():
export = {}
export["hello"] = "yes?"
return jsonify(**export)
@app.route('/prediction', methods=['GET', 'POST'])
def next_move():
'''Predict next move and send to client.
Parses the move and hands the work off to the bot.
'''
content = request.json
col = content['i']
row = content['j']
print('Received move:')
print((col, row))
self.bot.apply_move('b', (row, col))
bot_row, bot_col = self.bot.select_move('w')
print('Prediction:')
print((bot_col, bot_row))
result = {'i': bot_col, 'j': bot_row}
json_result = jsonify(**result)
return json_result
self.app.run(host='0.0.0.0', port=self.port, debug=True, use_reloader=False)
class GoModel(object):
'''Tracks a board and selects moves.'''
def __init__(self, model, processor):
'''
Parameters:
-----------
processor: Instance of betago.processor.GoDataLoader, e.g. SevenPlaneProcessor
model: In principle this can be anything that can predict go moves, given data provided by the above
processor. In practice it may very well be (an extension of) a keras model plus glue code.
'''
self.model = model
self.processor = processor
self.go_board = GoBoard(19)
self.num_planes = processor.num_planes
def set_board(self, board):
'''Set the board to a specific state.'''
self.go_board = copy.deepcopy(board)
def apply_move(self, color, move):
''' Apply the human move'''
return NotImplemented
def select_move(self, bot_color):
''' Select a move for the bot'''
return NotImplemented
class KerasBot(GoModel):
'''
KerasBot takes top_n predictions of a keras model and tries to apply the best move. If that move is illegal,
choose the next best, until the list is exhausted. If no more moves are left to play, continue with random
moves until a legal move is found.
'''
def __init__(self, model, processor, top_n=10):
super(KerasBot, self).__init__(model=model, processor=processor)
self.top_n = top_n
def apply_move(self, color, move):
# Apply human move
self.go_board.apply_move(color, move)
def select_move(self, bot_color):
move = get_first_valid_move(self.go_board, bot_color,
self._move_generator(bot_color))
if move is not None:
self.go_board.apply_move(bot_color, move)
return move
def _move_generator(self, bot_color):
return chain(
# First try the model.
self._model_moves(bot_color),
# If none of the model moves are valid, fill in a random
# dame point. This is probably not a very good move, but
# it's better than randomly filling in our own eyes.
fill_dame(self.go_board),
# Lastly just try any open space.
generate_in_random_order(all_empty_points(self.go_board)),
)
def _model_moves(self, bot_color):
# Turn the board into a feature vector.
# The (0, 0) is for generating the label, which we ignore.
X, label = self.processor.feature_and_label(
bot_color, (0, 0), self.go_board, self.num_planes)
X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
# Generate bot move.
pred = np.squeeze(self.model.predict(X))
top_n_pred_idx = pred.argsort()[-self.top_n:][::-1]
for idx in top_n_pred_idx:
prediction = int(idx)
pred_row = prediction // 19
pred_col = prediction % 19
pred_move = (pred_row, pred_col)
yield pred_move
class RandomizedKerasBot(GoModel):
'''
Takes a weighted sample from the predictions of a keras model. If none of those moves is legal,
pick a random move.
'''
def __init__(self, model, processor):
super(RandomizedKerasBot, self).__init__(model=model, processor=processor)
def apply_move(self, color, move):
# Apply human move
self.go_board.apply_move(color, move)
def select_move(self, bot_color):
move = get_first_valid_move(self.go_board, bot_color,
self._move_generator(bot_color))
if move is not None:
self.go_board.apply_move(bot_color, move)
return move
def _move_generator(self, bot_color):
return chain(
# First try the model.
self._model_moves(bot_color),
# If none of the model moves are valid, fill in a random
# dame point. This is probably not a very good move, but
# it's better than randomly filling in our own eyes.
fill_dame(self.go_board),
# Lastly just try any open space.
generate_in_random_order(all_empty_points(self.go_board)),
)
def _model_moves(self, bot_color):
# Turn the board into a feature vector.
# The (0, 0) is for generating the label, which we ignore.
X, label = self.processor.feature_and_label(
bot_color, (0, 0), self.go_board, self.num_planes)
X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
# Generate moves from the keras model.
n_samples = 20
pred = np.squeeze(self.model.predict(X))
# Cube the predictions to increase the difference between the
# best and worst moves. Otherwise, it will make too many
# nonsense moves. (There's no scientific basis for this, it's
# just an ad-hoc adjustment)
pred = pred * pred * pred
pred /= pred.sum()
moves = np.random.choice(19 * 19, size=n_samples, replace=False, p=pred)
for prediction in moves:
pred_row = prediction // 19
pred_col = prediction % 19
yield (pred_row, pred_col)
class IdiotBot(GoModel):
'''
Play random moves, like a good 30k bot.
'''
def __init__(self, model=None, processor=ThreePlaneProcessor()):
super(IdiotBot, self).__init__(model=model, processor=processor)
def apply_move(self, color, move):
self.go_board.apply_move(color, move)
def select_move(self, bot_color):
move = get_first_valid_move(
self.go_board,
bot_color,
# TODO: this function is gone. retrieve it.
generate_randomized(all_empty_points(self.go_board))
)
if move is not None:
self.go_board.apply_move(bot_color, move)
return move
def get_first_valid_move(board, color, move_generator):
for move in move_generator:
if move is None or board.is_move_legal(color, move):
return move
return None
def generate_in_random_order(point_list):
"""Yield all points in the list in a random order."""
point_list = copy.copy(point_list)
random.shuffle(point_list)
for candidate in point_list:
yield candidate
def all_empty_points(board):
"""Return all empty positions on the board."""
empty_points = []
for point in product(list(range(board.board_size)), list(range(board.board_size))):
if point not in board.board:
empty_points.append(point)
return empty_points
def fill_dame(board):
status = scoring.evaluate_territory(board)
# Pass when all dame are filled.
if status.num_dame == 0:
yield None
for dame_point in generate_in_random_order(status.dame_points):
yield dame_point
|
sample_bootstrap.py
|
import sys, time
from threading import Thread
sys.path.insert(0, './../')
import PoP
import myGetMVP, importGameRes, myGetRating # function written by the user
# the nodeID config is already generated, please refer to sample_setup.py
nodeID, matchID = "Alice", 1
myPoP = PoP.handler(nodeID=nodeID, winnerFunc=myGetMVP.getMVP, ratingFunc=myGetRating.getRating)
def run_match(thisMatchID):
@myPoP.run_conn(matchID=thisMatchID)
def this_match():
########## API entry and p2p entry threads are running, you can execute any code here ##########
while len(myPoP.return_plyrList()) < 2: time.sleep(1)
plyrList = myPoP.return_plyrList()
# assume a match record has been produced from a match: 1533081738_4035507616_match.data
with open('1533081738_4035507616_match.data', 'r') as f: rawGameRec = f.read()
gamePlyrList = sorted([item for key, item in plyrList.items()]) # assume the match records sorted corresponds to this plyr list
gameRec = importGameRes.importGameResult(rawGameRec, gamePlyrList)
myPoP.verify_game(gameRec) # this will return (gameRec, MVP) for user operations if required
res = myPoP.broadcast_gameRec()
print(res)
myPoP.terminate() # pop will only be terminated if this is ran, otherwise it freeze at the end of this function
return
this_match()
waitingMatch = True
@myPoP.run_blockchain(saveState=False)
def run_blockchain():
################ blockchain entry is running, you can execute any code here ################
print("blockchain is running")
while waitingMatch: time.sleep(1)
print(myPoP.return_chain_status())
myPoP.terminate()
if __name__ == '__main__':
try:
blockchain = Thread(target=run_blockchain)
blockchain.daemon = True
blockchain.start()
run_match(thisMatchID=matchID)
time.sleep(1)
waitingMatch = False
while blockchain.isAlive(): time.sleep(1)
except (KeyboardInterrupt, SystemExit):
print("example completed")
|
main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import configargparse
import cv2 as cv
from gestures.tello_gesture_controller import TelloGestureController
from gestures.swarm_gesture_controller import SwarmGestureController
from utils import CvFpsCalc
from djitellopy import Tello
from djitellopy import TelloSwarm
from gestures import *
import threading
def get_args():
print('## Reading configuration ##')
parser = configargparse.ArgParser(default_config_files=['config.txt'])
parser.add('-c', '--my-config', required=False, is_config_file=True, help='config file path')
parser.add("--device", type=int)
parser.add("--width", help='cap width', type=int)
parser.add("--height", help='cap height', type=int)
parser.add("--is_keyboard", help='To use Keyboard control by default', type=bool)
parser.add('--use_static_image_mode', action='store_true', help='True if running on photos')
parser.add("--min_detection_confidence",
help='min_detection_confidence',
type=float)
parser.add("--min_tracking_confidence",
help='min_tracking_confidence',
type=float)
parser.add("--buffer_len",
help='Length of gesture buffer',
type=int)
args = parser.parse_args()
return args
def select_mode(key, mode):
number = -1
if 48 <= key <= 57: # 0 ~ 9
number = key - 48
if key == 110: # n
mode = 0
if key == 107: # k
mode = 1
if key == 104: # h
mode = 2
return number, mode
def main():
# init global vars
global gesture_buffer
global gesture_id
global battery_status
global swarm_behavior # If program will command swarm or individual tello
swarm_behavior = True
if swarm_behavior:
global swarm_bat_stat # PATRICK
swarm_bat_stat = [] # PATRICK
# Argument parsing
args = get_args()
KEYBOARD_CONTROL = args.is_keyboard
WRITE_CONTROL = False
in_flight = False
# Camera preparation
if not swarm_behavior: # ORIGINAL (used for single drone)
tello = Tello()
tello.connect()
# Use tello's camera - ORIGINAL
cap = tello.get_frame_read()
tello.streamon()
# Init Tello Controllers
gesture_controller = TelloGestureController(tello) # ORIGINAL
keyboard_controller = TelloKeyboardController(tello) # ORIGINAL
else:
# Multi Drone Control - PATRICK
Tello1_IP = "192.168.1.100"
Tello2_IP = "192.168.1.200"
drone1 = Tello(Tello1_IP)
drone2 = Tello(Tello2_IP)
swarm = TelloSwarm([drone1,drone2])
swarm.connect()
# Use computer's camera - PATRICK
cap = cv.VideoCapture(0)
# Init Swarm Controller
gesture_controller = SwarmGestureController(swarm) # PATRICK
gesture_detector = GestureRecognition(args.use_static_image_mode, args.min_detection_confidence,
args.min_tracking_confidence)
gesture_buffer = GestureBuffer(buffer_len=args.buffer_len)
def tello_control(key, keyboard_controller, gesture_controller): # ORIGINAL
global gesture_buffer
if KEYBOARD_CONTROL:
keyboard_controller.control(key)
else:
gesture_controller.gesture_control(gesture_buffer)
def swarm_control(gesture_controller): # PATRICK
global gesture_buffer
if not KEYBOARD_CONTROL: # Wait to press G key
gesture_controller.gesture_control(gesture_buffer)
def tello_battery(tello): # ORIGINAL
global battery_status
try:
battery_status = tello.get_battery() # had [:-2] at end that caused type error
except:
battery_status = -1
def swarm_battery(swarm): # PATRICK
global swarm_bat_stat
try:
for count,tello in enumerate(swarm):
swarm_bat_stat[count] = tello.get_battery()
except:
for count,tello in enumerate(swarm):
swarm_bat_stat[count] = -1
# FPS Measurement
cv_fps_calc = CvFpsCalc(buffer_len=10)
mode = 0
number = -1
if not swarm_behavior:
battery_status = -1 # ORIGINAL
else:
for tello in swarm: # PATRICK
swarm_bat_stat.append(-2) # Set to -2 here to be different from error value in swarm_battery()
while True:
fps = cv_fps_calc.get()
# Process Key (ESC: end)
key = cv.waitKey(1) & 0xff
if key == 27: # ESC
break
elif key == 32: # Space
if not in_flight:
if not swarm_behavior:
tello.takeoff() # Take-off drone ORIGINAL
else:
swarm.takeoff() # Take-off drone PATRICK
in_flight = True
elif in_flight:
if not swarm_behavior:
tello.land() # Land tello ORIGINAL
else:
swarm.land() # Land tello PATRICK
in_flight = False
elif key == ord('k'): # Keyboard Control
mode = 0
KEYBOARD_CONTROL = True
WRITE_CONTROL = False
# Stop moving
if not swarm_behavior:
tello.send_rc_control(0, 0, 0, 0)
else:
swarm.parallel(lambda i, tello: tello.send_rc_control(0,0,0,0) )
elif key == ord('g'): # Gesture Control
KEYBOARD_CONTROL = False
elif key == ord('n'): # Save Key Points
mode = 1
WRITE_CONTROL = True
KEYBOARD_CONTROL = True
if WRITE_CONTROL: # Generate Training Data For Gesture
number = -1
if 48 <= key <= 57: # 0 ~ 9
number = key - 48
# Camera capture
if not swarm_behavior:
image = cap.frame # ORIGINAL - Use tello's camera
else:
success, image = cap.read() # PATRICK - Use computer's camera
if not success:
continue # try to capture another frame successfully
debug_image, gesture_id = gesture_detector.recognize(image, number, mode)
gesture_buffer.add_gesture(gesture_id)
# Start control threads
if not swarm_behavior:
threading.Thread(target=tello_control, args=(key, keyboard_controller, gesture_controller,)).start() # ORIGINAL
threading.Thread(target=tello_battery, args=(tello,)).start() # ORIGINAL
else:
threading.Thread(target=swarm_control, args=(gesture_controller,)).start() # PATRICK
threading.Thread(target=swarm_battery, args=(swarm,)).start() # PATRICK
debug_image = gesture_detector.draw_info(debug_image, fps, mode, number)
# Battery status and image rendering
if not swarm_behavior:
cv.putText(debug_image, "Battery: {}".format(battery_status), (5, 720 - 5), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # ORIGINAL
else:
cv.putText(debug_image, "Allow Flip: {}".format(gesture_controller.AllowF), (5, 480 - 5), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # PATRICK
for count,tello in enumerate(swarm):
cv.putText(debug_image, f"tello {count+1} Battery: {swarm_bat_stat[count]}", (5, 400 - count * 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # PATRICK
cv.imshow('Tello Gesture Recognition', debug_image)
if not swarm:
tello.land()
tello.end()
else:
swarm.parallel(lambda i, tello: tello.land() )
swarm.end()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
database_screen.py
|
import pathlib
import threading
import traceback
import json
from colorama import Style, Fore
from cryptography.fernet import InvalidToken, Fernet
from YeetsMenu.menu import Menu
from YeetsMenu.option import Option
from pw_manager.utils import utils, constants, decorators, errors
from pw_manager.db import Database
from pw_manager.utils.legacy_encryption import encryptor
from pw_manager.ui import db_sync_screen
@decorators.catch_ctrl_c
def create_database():
utils.clear_screen()
utils.print_noice("Database creation")
print(f"{constants.colors[1]}Where should we create the database? If the folder doesn't exist, it will be created. Press ctrl+c to abort the creation.")
print()
try:
path_str = utils.ask_till_input(f"{constants.colors[1]}Please enter a folder to store the database!\n > {constants.colors[0]}")
except KeyboardInterrupt:
print(f"{Fore.RED}Creation aborted!{Style.RESET_ALL}")
return
try:
file_name = input(f"{constants.colors[1]}Please enter a filename. Leave it empty for the default (database.db)\n > {constants.colors[0]}")
except KeyboardInterrupt:
print(f"{Fore.RED}Creation aborted!{Style.RESET_ALL}")
return
try:
while True:
password = utils.ask_till_input_secret(f"{constants.colors[1]}Please enter a password for this database!\n > {constants.colors[0]}")
confirmation_password = utils.ask_till_input_secret(f"{constants.colors[1]}Please confirm the password for this database!\n > {constants.colors[0]}")
if password != confirmation_password:
yes_no = utils.ask_till_input(f"{constants.colors[1]}Passwords don't match! Do you want to try again? y/N\n > {constants.colors[0]}")
if yes_no.lower() == "n":
return
else:
break
except KeyboardInterrupt:
print(f"{Fore.RED}Creation aborted!{Style.RESET_ALL}")
return
utils.reset_style()
folder = pathlib.Path(path_str)
if not folder.exists():
folder.mkdir(parents=True)
if not file_name:
file_name = "database.db"
full_db_path = pathlib.Path(str(folder.absolute()) + "/" + file_name)
db: Database = Database(str(full_db_path), password)
done_event = threading.Event()
try:
# threading.Thread(target=utils.run_spinning_animation_till_event, args=["Creating database...", done_event]).start()
db.create()
done_event.set()
except errors.DatabaseAlreadyFoundException:
done_event.set()
print(f"{Fore.RED}A database already exists at that path!{Style.RESET_ALL}")
return
except Exception:
done_event.set()
print(f"{Fore.RED}An error has occurred!{Style.RESET_ALL}")
traceback.print_exc()
return
done_event.set()
print(f"{Fore.GREEN}Successfully created a database at {constants.colors[0]}{str(full_db_path.absolute())}{Fore.GREEN}!\n\n{constants.colors[1]}Note: You still have to select the database in order to use it!{Style.RESET_ALL}")
@decorators.catch_ctrl_c
def select_database():
utils.clear_screen()
cache_file_path = pathlib.Path(utils.get_cache_file())
if not cache_file_path.exists():
print(f"{constants.colors[1]}No previously created databases found! You might want to create one or add an already existing one!")
return
with open(str(cache_file_path.absolute())) as f:
cache_file: dict = json.load(f)
tmp_dict: dict = dict()
for key, value in cache_file.items():
if pathlib.Path(value).exists():
tmp_dict[len(tmp_dict.keys())] = value
cache_file = tmp_dict
with open(str(cache_file_path.absolute()), "w") as f:
json.dump(cache_file, f, indent=2)
db_selection_menu = Menu(utils.get_noice_text("Which database do you want to use?"), colors=constants.colors)
def real_select_database(path: str):
while True:
utils.clear_screen()
utils.print_noice(f"Currently selecting database: {path}")
password = utils.ask_till_input_secret(f"{constants.colors[1]}Password for the database: {constants.colors[0]}")
utils.reset_style()
try:
db: Database = Database(path, password)
db.read()
constants.db_file = db
break
except InvalidToken:
print(f"{Fore.RED}Invalid password!{Style.RESET_ALL}")
try_again = utils.ask_till_input("Do you want to try again? y/n: ")
if try_again.lower() == "y":
continue
else:
return
print(f"{Fore.GREEN}Successfully selected {constants.colors[1]}{constants.db_file.path}{Fore.GREEN} as the database!{Style.RESET_ALL}")
for i in cache_file.values():
db_selection_menu.add_selectable(Option(f"- {i}", real_select_database, i, return_after_execution=True, skip_enter_confirmation=True))
db_selection_menu.run()
@decorators.catch_ctrl_c
def add_existing_database():
utils.print_noice("Add existing database")
db_path = utils.ask_till_input(f"{constants.colors[1]}Please enter the path of the existing database\n > {constants.colors[0]}")
utils.reset_style()
db_path = pathlib.Path(db_path)
if not db_path.exists():
print(f"{Fore.RED}The directory {constants.colors[0]}{str(db_path.absolute())}{Fore.RED} doesn't exist!")
return
if db_path.is_dir() and not db_path.is_file():
print(f"{Fore.RED}The path has to point to the database file and not a directory!{Style.RESET_ALL}")
return
success = utils.add_db_path_to_cache(str(db_path.absolute()))
if success:
print(f"{Fore.GREEN}Successfully added path {constants.colors[0]}{str(db_path.absolute())}{Fore.GREEN} to the list of known databases!{Style.RESET_ALL}")
else:
print(f"{Fore.RED}Failed to add the path {constants.colors[0]}{str(db_path.absolute())}{Fore.RED} to the list of known databases because it already is an entry!{Style.RESET_ALL}")
@decorators.catch_ctrl_c
@decorators.require_valid_db
def import_v1_database():
utils.clear_screen()
utils.print_noice("Import a v1 database")
print(f"{constants.colors[1]}This will import all the entries from the specified database into the currently loaded one!")
path = utils.ask_till_input(f"{constants.colors[1]}Please enter the directory path of the v1 database!\n > {constants.colors[0]}")
path = pathlib.Path(path)
if not path.exists():
print(f"{Fore.RED}That path doesn't exist!{Style.RESET_ALL}")
return
if path.is_dir() and not pathlib.Path(str(path.absolute()) + "/database.db").exists():
print(f"{Fore.RED}That path doesn't have a database.db file!")
return
if path.is_file() and path.name == "database.db":
path = path.parent
db_password = utils.ask_till_input_secret(f"{constants.colors[1]}Enter the password of the v1 database file!\n > {constants.colors[0]}")
with open(str(path.absolute()) + "/database.db.salt", "rb") as f:
salt = f.read()
with open(str(path.absolute()) + "/database.db", "rb") as f:
data = f.read()
key, salt = encryptor.generate_key_using_password(db_password, salt)
fernet = Fernet(key)
try:
unencrypted_content = fernet.decrypt(data)
except InvalidToken:
print(f"{Fore.RED}Wrong password!{Style.RESET_ALL}")
return
event = threading.Event()
threading.Thread(target=utils.run_spinning_animation_till_event, args=["Importing database...", event]).start()
json_content: dict = json.loads(unencrypted_content.decode())
db: Database = constants.db_file
for key, value in json_content.items():
website_or_usage = key
description = value.get("description")
username = value.get("username")
password = value.get("password")
db.add_database_entry(website_or_usage=website_or_usage, description=description, username=username, password=password, should_write=False)
db.write()
event.set()
def show():
utils.clear_screen()
menu = Menu(utils.get_noice_text("Database menu"), colors=constants.colors)
menu.add_selectable(Option("Create database", create_database))
menu.add_selectable(Option("Select database", select_database, return_after_execution=True))
menu.add_selectable(Option("Add already existing database", add_existing_database))
menu.add_selectable(Option("Import v1 database", import_v1_database))
menu.add_selectable(Option("Sync settings", db_sync_screen.show, skip_enter_confirmation=True))
menu.run()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
# PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
DIR = os.path.dirname(os.path.abspath(__file__))
DIR_TO_LABELS = '/'.join(DIR.split("/")[0:-1]) + "/" + MODEL_NAME
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
try:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
except:
label_map = label_map_util.load_labelmap(os.path.join(DIR_TO_LABELS, 'hand_label_map.pbtxt'))
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
class Params:
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np,
img_mid_point, hand_buffer):
err = None
width_cur = 0
i = 0
if (scores[i] > score_thresh):
## if greater than threshold
## calculate the boxes
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
## make the tuples of left top and right bottom
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
## calculate the width of the boxes
width_cur = abs(left - right)
## get the mid point of boxes
mid_cur = mid(p1, p2)
# if hand buffer is not None calculate the mid point of previous box
if not None in hand_buffer:
mid_buf = mid(hand_buffer[0], hand_buffer[1])
width_buf = hand_buffer[1][0] - hand_buffer[0][0]
# check hand buffer and euclidean distance between current box with previous box and the euclidean distan and size of width of the box
if None in hand_buffer or (euclidean(mid_cur, mid_buf) < 8.5 and abs(width_buf - width_cur) < 30):
if not None in hand_buffer:
print('eu' + str(euclidean(mid_cur, mid_buf)))
hand_buffer[0] = p1
hand_buffer[1] = p2
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
err = get_err(left, right, top, bottom, img_mid_point)
hand_buffer[2] = err
else:
hand_buffer = [None, None, None]
else:
## the score not pass the threshold
hand_buffer = [None, None, None]
return hand_buffer, width_cur
def euclidean(pt1, pt2):
""" Calculate the euclidean distance """
return np.sqrt((pt1[0] - pt2[0])^2 + (pt1[1] - pt2[1])^2)
def mid(p1, p2):
""" calculate the mid point x and y"""
mid_buf_x = (p2[0] - p1[0]) / 2
mid_buf_y = (p2[1] - p2[0]) / 2
return mid_buf_x, mid_buf_y
def get_err(left, right, top, bottom, img_mid_point):
""" get the error position of hand with consider mid point"""
target_mid_p = np.abs(left - right) / 2
offset = (left + target_mid_p)
print(offset)
err = offset - img_mid_point
return err
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
""" Draw fps """
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
cli.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import click
import copy
from functools import wraps
import glob
import io
import json
import logging
import netrc
import os
import random
import re
import requests
import shlex
import signal
import socket
import stat
import subprocess
import sys
import textwrap
import time
import traceback
import yaml
import threading
import random
# pycreds has a find_executable that works in windows
from dockerpycreds.utils import find_executable
from wandb import util
from click.utils import LazyFile
from click.exceptions import BadParameter, ClickException, Abort
# whaaaaat depends on prompt_toolkit < 2, ipython now uses > 2 so we vendored for now
# DANGER this changes the sys.path so we should never do this in a user script
whaaaaat = util.vendor_import("whaaaaat")
import six
from six.moves import BaseHTTPServer, urllib, configparser
import socket
from .core import termlog
import wandb
from wandb.apis import InternalApi
from wandb.wandb_config import Config
from wandb import wandb_agent
from wandb import wandb_controller
from wandb import env
from wandb import wandb_run
from wandb import wandb_dir
from wandb import run_manager
from wandb import Error
from wandb.magic_impl import magic_install
DOCS_URL = 'http://docs.wandb.com/'
logger = logging.getLogger(__name__)
class ClickWandbException(ClickException):
def format_message(self):
log_file = util.get_log_file_path()
orig_type = '{}.{}'.format(self.orig_type.__module__,
self.orig_type.__name__)
if issubclass(self.orig_type, Error):
return click.style(str(self.message), fg="red")
else:
return ('An Exception was raised, see %s for full traceback.\n'
'%s: %s' % (log_file, orig_type, self.message))
class CallbackHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple callback handler that stores query string parameters and
shuts down the server.
"""
def do_GET(self):
self.server.result = urllib.parse.parse_qs(
self.path.split("?")[-1])
self.send_response(200)
self.end_headers()
self.wfile.write(b'Success')
self.server.stop()
def log_message(self, format, *args):
pass
class LocalServer():
"""A local HTTP server that finds an open port and listens for a callback.
The urlencoded callback url is accessed via `.qs` the query parameters passed
to the callback are accessed via `.result`
"""
def __init__(self):
self.blocking = True
self.port = 8666
self.connect()
self._server.result = {}
self._server.stop = self.stop
def connect(self, attempts=1):
try:
self._server = BaseHTTPServer.HTTPServer(
('127.0.0.1', self.port), CallbackHandler)
except socket.error:
if attempts < 5:
self.port += random.randint(1, 1000)
self.connect(attempts + 1)
else:
logging.info(
"Unable to start local server, proceeding manually")
class FakeServer():
def serve_forever(self):
pass
self._server = FakeServer()
def qs(self):
return urllib.parse.urlencode({
"callback": "http://127.0.0.1:{}/callback".format(self.port)})
@property
def result(self):
return self._server.result
def start(self, blocking=True):
self.blocking = blocking
if self.blocking:
self._server.serve_forever()
else:
t = threading.Thread(target=self._server.serve_forever)
t.daemon = True
t.start()
def stop(self, *args):
t = threading.Thread(target=self._server.shutdown)
t.daemon = True
t.start()
if not self.blocking:
os.kill(os.getpid(), signal.SIGINT)
def display_error(func):
"""Function decorator for catching common errors and re-raising as wandb.Error"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except wandb.Error as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logger.error(''.join(lines))
click_exc = ClickWandbException(e)
click_exc.orig_type = exc_type
six.reraise(ClickWandbException, click_exc, sys.exc_info()[2])
return wrapper
def prompt_for_project(ctx, entity):
"""Ask the user for a project, creating one if necessary."""
result = ctx.invoke(projects, entity=entity, display=False)
try:
if len(result) == 0:
project = click.prompt("Enter a name for your first project")
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
else:
project_names = [project["name"] for project in result]
question = {
'type': 'list',
'name': 'project_name',
'message': "Which project should we use?",
'choices': project_names + ["Create New"]
}
result = whaaaaat.prompt([question])
if result:
project = result['project_name']
else:
project = "Create New"
# TODO: check with the server if the project exists
if project == "Create New":
project = click.prompt(
"Enter a name for your new project", value_proc=api.format_project)
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
except wandb.apis.CommError as e:
raise ClickException(str(e))
return project
def editor(content='', marker='# Enter a description, markdown is allowed!\n'):
message = click.edit(content + '\n\n' + marker)
if message is not None:
return message.split(marker, 1)[0].rstrip('\n')
api = InternalApi()
# Some commands take project/entity etc. as arguments. We provide default
# values for those arguments from the current project configuration, as
# returned by api.settings()
CONTEXT = dict(default_map=api.settings())
class RunGroup(click.Group):
@display_error
def get_command(self, ctx, cmd_name):
# TODO: check if cmd_name is a file in the current dir and not require `run`?
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
return None
@click.command(cls=RunGroup, invoke_without_command=True)
@click.version_option(version=wandb.__version__)
@click.pass_context
def cli(ctx):
"""Weights & Biases.
Run "wandb docs" for full documentation.
"""
wandb.try_to_set_up_global_logging()
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
@cli.command(context_settings=CONTEXT, help="List projects")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def projects(entity, display=True):
projects = api.list_projects(entity=entity)
if len(projects) == 0:
message = "No projects found for %s" % entity
else:
message = 'Latest projects for "%s"' % entity
if display:
click.echo(click.style(message, bold=True))
for project in projects:
click.echo("".join(
(click.style(project['name'], fg="blue", bold=True),
" - ",
str(project['description'] or "").split("\n")[0])
))
return projects
@cli.command(context_settings=CONTEXT, help="List runs in a project")
@click.pass_context
@click.option("--project", "-p", default=None, envvar=env.PROJECT, help="The project you wish to list runs from.")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def runs(ctx, project, entity):
click.echo(click.style('Latest runs for project "%s"' %
project, bold=True))
if project is None:
project = prompt_for_project(ctx, project)
runs = api.list_runs(project, entity=entity)
for run in runs:
click.echo("".join(
(click.style(run['name'], fg="blue", bold=True),
" - ",
(run['description'] or "").split("\n")[0])
))
@cli.command(context_settings=CONTEXT, help="List local & remote file status")
@click.argument("run", envvar=env.RUN_ID)
@click.option("--settings/--no-settings", help="Show the current settings", default=True)
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you wish to upload to.")
@display_error
def status(run, settings, project):
logged_in = bool(api.api_key)
if not os.path.isdir(wandb_dir()):
if logged_in:
msg = "Directory not initialized. Please run %s to get started." % click.style(
"wandb init", bold=True)
else:
msg = "You are not logged in. Please run %s to get started." % click.style(
"wandb login", bold=True)
termlog(msg)
elif settings:
click.echo(click.style("Logged in?", bold=True) + " %s" % logged_in)
click.echo(click.style("Current Settings", bold=True))
settings = api.settings()
click.echo(json.dumps(
settings,
sort_keys=True,
indent=2,
separators=(',', ': ')
))
@cli.command(context_settings=CONTEXT, help="Restore code, config and docker state for a run")
@click.pass_context
@click.argument("run", envvar=env.RUN_ID)
@click.option("--no-git", is_flag=True, default=False, help="Skupp")
@click.option("--branch/--no-branch", default=True, help="Whether to create a branch or checkout detached")
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you wish to upload to.")
@click.option("--entity", "-e", envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def restore(ctx, run, no_git, branch, project, entity):
if ":" in run:
if "/" in run:
entity, rest = run.split("/", 1)
else:
rest = run
project, run = rest.split(":", 1)
elif run.count("/") > 1:
entity, run = run.split("/", 1)
project, run = api.parse_slug(run, project=project)
commit, json_config, patch_content, metadata = api.run_config(
project, run=run, entity=entity)
repo = metadata.get("git", {}).get("repo")
image = metadata.get("docker")
RESTORE_MESSAGE = """`wandb restore` needs to be run from the same git repository as the original run.
Run `git clone %s` and restore from there or pass the --no-git flag.""" % repo
if no_git:
commit = None
elif not api.git.enabled:
if repo:
raise ClickException(RESTORE_MESSAGE)
elif image:
wandb.termlog("Original run has no git history. Just restoring config and docker")
if commit and api.git.enabled:
subprocess.check_call(['git', 'fetch', '--all'])
try:
api.git.repo.commit(commit)
except ValueError:
wandb.termlog("Couldn't find original commit: {}".format(commit))
commit = None
files = api.download_urls(project, run=run, entity=entity)
for filename in files:
if filename.startswith('upstream_diff_') and filename.endswith('.patch'):
commit = filename[len('upstream_diff_'):-len('.patch')]
try:
api.git.repo.commit(commit)
except ValueError:
commit = None
else:
break
if commit:
wandb.termlog(
"Falling back to upstream commit: {}".format(commit))
patch_path, _ = api.download_write_file(files[filename])
else:
raise ClickException(RESTORE_MESSAGE)
else:
if patch_content:
patch_path = os.path.join(wandb.wandb_dir(), 'diff.patch')
with open(patch_path, "w") as f:
f.write(patch_content)
else:
patch_path = None
branch_name = "wandb/%s" % run
if branch and branch_name not in api.git.repo.branches:
api.git.repo.git.checkout(commit, b=branch_name)
wandb.termlog("Created branch %s" %
click.style(branch_name, bold=True))
elif branch:
wandb.termlog(
"Using existing branch, run `git branch -D %s` from master for a clean checkout" % branch_name)
api.git.repo.git.checkout(branch_name)
else:
wandb.termlog("Checking out %s in detached mode" % commit)
api.git.repo.git.checkout(commit)
if patch_path:
# we apply the patch from the repository root so git doesn't exclude
# things outside the current directory
root = api.git.root
patch_rel_path = os.path.relpath(patch_path, start=root)
# --reject is necessary or else this fails any time a binary file
# occurs in the diff
# we use .call() instead of .check_call() for the same reason
# TODO(adrian): this means there is no error checking here
subprocess.call(['git', 'apply', '--reject',
patch_rel_path], cwd=root)
wandb.termlog("Applied patch")
# TODO: we should likely respect WANDB_DIR here.
util.mkdir_exists_ok("wandb")
config = Config(run_dir="wandb")
config.load_json(json_config)
config.persist()
wandb.termlog("Restored config variables to %s" % config._config_path())
if image:
if not metadata["program"].startswith("<") and metadata.get("args") is not None:
# TODO: we may not want to default to python here.
runner = util.find_runner(metadata["program"]) or ["python"]
command = runner + [metadata["program"]] + metadata["args"]
cmd = " ".join(command)
else:
wandb.termlog("Couldn't find original command, just restoring environment")
cmd = None
wandb.termlog("Docker image found, attempting to start")
ctx.invoke(docker, docker_run_args=[image], cmd=cmd)
return commit, json_config, patch_content, repo, metadata
@cli.command(context_settings=CONTEXT, help="Upload an offline training directory to W&B")
@click.pass_context
@click.argument("path", nargs=-1, type=click.Path(exists=True))
@click.option("--id", envvar=env.RUN_ID, help="The run you want to upload to.")
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you want to upload to.")
@click.option("--entity", "-e", envvar=env.ENTITY, help="The entity to scope to.")
@click.option("--ignore", help="A comma seperated list of globs to ignore syncing with wandb.")
@display_error
def sync(ctx, path, id, project, entity, ignore):
if api.api_key is None:
ctx.invoke(login)
if ignore:
globs = ignore.split(",")
else:
globs = None
path = path[0] if len(path) > 0 else os.getcwd()
if os.path.isfile(path):
raise ClickException("path must be a directory")
wandb_dir = os.path.join(path, "wandb")
run_paths = glob.glob(os.path.join(wandb_dir, "*run-*"))
if len(run_paths) == 0:
run_paths = glob.glob(os.path.join(path, "*run-*"))
if len(run_paths) > 0:
for run_path in run_paths:
wandb_run.Run.from_directory(run_path,
run_id=run_path.split("-")[-1], project=project, entity=entity, ignore_globs=globs)
else:
wandb_run.Run.from_directory(
path, run_id=id, project=project, entity=entity, ignore_globs=globs)
@cli.command(context_settings=CONTEXT, help="Pull files from Weights & Biases")
@click.argument("run", envvar=env.RUN_ID)
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you want to download.")
@click.option("--entity", "-e", default="models", envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def pull(run, project, entity):
project, run = api.parse_slug(run, project=project)
urls = api.download_urls(project, run=run, entity=entity)
if len(urls) == 0:
raise ClickException("Run has no files")
click.echo("Downloading: {project}/{run}".format(
project=click.style(project, bold=True), run=run
))
for name in urls:
if api.file_current(name, urls[name]['md5']):
click.echo("File %s is up to date" % name)
else:
length, response = api.download_file(urls[name]['url'])
# TODO: I had to add this because some versions in CI broke click.progressbar
sys.stdout.write("File %s\r" % name)
dirname = os.path.dirname(name)
if dirname != '':
wandb.util.mkdir_exists_ok(dirname)
with click.progressbar(length=length, label='File %s' % name,
fill_char=click.style('&', fg='green')) as bar:
with open(name, "wb") as f:
for data in response.iter_content(chunk_size=4096):
f.write(data)
bar.update(len(data))
@cli.command(context_settings=CONTEXT, help="Login to Weights & Biases")
@click.argument("key", nargs=-1)
@click.option("--browser/--no-browser", default=True, help="Attempt to launch a browser for login")
@click.option("--anonymous/--no-anonymous", default=False, help="Login anonymously")
@display_error
def login(key, server=LocalServer(), browser=True, anonymous=False):
global api
key = key[0] if len(key) > 0 else None
# Import in here for performance reasons
import webbrowser
browser = util.launch_browser(browser)
def get_api_key_from_browser():
if not browser:
return None
launched = webbrowser.open_new_tab('{}/authorize?{}'.format(api.app_url, server.qs()))
#Getting rid of the server for now. We would need to catch Abort from server.stop and deal accordingly
#server.start(blocking=False)
#if server.result.get("key"):
# return server.result["key"][0]
return None
if key:
util.set_api_key(api, key)
else:
if anonymous:
os.environ[env.ANONYMOUS] = "must"
key = util.prompt_api_key(api, browser_callback=get_api_key_from_browser)
if key:
api.clear_setting('disabled')
click.secho("Successfully logged in to Weights & Biases!", fg="green")
else:
api.set_setting('disabled', 'true')
click.echo("Disabling Weights & Biases. Run 'wandb login' again to re-enable.")
# reinitialize API to create the new client
api = InternalApi()
return key
@cli.command(context_settings=CONTEXT, help="Configure a directory with Weights & Biases")
@click.pass_context
@display_error
def init(ctx):
from wandb import _set_stage_dir, __stage_dir__, wandb_dir
if __stage_dir__ is None:
_set_stage_dir('wandb')
if os.path.isdir(wandb_dir()) and os.path.exists(os.path.join(wandb_dir(), "settings")):
click.confirm(click.style(
"This directory has been configured previously, should we re-configure it?", bold=True), abort=True)
else:
click.echo(click.style(
"Let's setup this directory for W&B!", fg="green", bold=True))
if api.api_key is None:
ctx.invoke(login)
viewer = api.viewer()
# Viewer can be `None` in case your API information became invalid, or
# in testing if you switch hosts.
if not viewer:
click.echo(click.style(
"Your login information seems to be invalid: can you log in again please?", fg="red", bold=True))
ctx.invoke(login)
# This shouldn't happen.
viewer = api.viewer()
if not viewer:
click.echo(click.style(
"We're sorry, there was a problem logging you in. Please send us a note at support@wandb.com and tell us how this happened.", fg="red", bold=True))
sys.exit(1)
# At this point we should be logged in successfully.
if len(viewer["teams"]["edges"]) > 1:
team_names = [e["node"]["name"] for e in viewer["teams"]["edges"]]
question = {
'type': 'list',
'name': 'team_name',
'message': "Which team should we use?",
'choices': team_names + ["Manual Entry"]
}
result = whaaaaat.prompt([question])
# result can be empty on click
if result:
entity = result['team_name']
else:
entity = "Manual Entry"
if entity == "Manual Entry":
entity = click.prompt("Enter the name of the team you want to use")
else:
entity = click.prompt("What username or team should we use?",
default=viewer.get('entity', 'models'))
# TODO: this error handling sucks and the output isn't pretty
try:
project = prompt_for_project(ctx, entity)
except wandb.cli.ClickWandbException:
raise ClickException('Could not find team: %s' % entity)
api.set_setting('entity', entity)
api.set_setting('project', project)
api.set_setting('base_url', api.settings().get('base_url'))
util.mkdir_exists_ok(wandb_dir())
with open(os.path.join(wandb_dir(), '.gitignore'), "w") as file:
file.write("*\n!settings")
click.echo(click.style("This directory is configured! Next, track a run:\n", fg="green") +
textwrap.dedent("""\
* In your training script:
{code1}
{code2}
* then `{run}`.
""").format(
code1=click.style("import wandb", bold=True),
code2=click.style("wandb.init(project=\"%s\")" % project, bold=True),
run=click.style("python <train.py>", bold=True),
# saving this here so I can easily put it back when we re-enable
# push/pull
# """
# * Run `{push}` to manually add a file.
# * Pull popular models into your project with: `{pull}`.
# """
# push=click.style("wandb push run_id weights.h5", bold=True),
# pull=click.style("wandb pull models/inception-v4", bold=True)
))
@cli.command(context_settings=CONTEXT, help="Open documentation in a browser")
@click.pass_context
@display_error
def docs(ctx):
import webbrowser
if util.launch_browser():
launched = webbrowser.open_new_tab(DOCS_URL)
else:
launched = False
if launched:
click.echo(click.style(
"Opening %s in your default browser" % DOCS_URL, fg="green"))
else:
click.echo(click.style(
"You can find our documentation here: %s" % DOCS_URL, fg="green"))
@cli.command("on", help="Ensure W&B is enabled in this directory")
@display_error
def on():
wandb.ensure_configured()
api = InternalApi()
try:
api.clear_setting('disabled')
except configparser.Error:
pass
click.echo(
"W&B enabled, running your script from this directory will now sync to the cloud.")
@cli.command("off", help="Disable W&B in this directory, useful for testing")
@display_error
def off():
wandb.ensure_configured()
api = InternalApi()
try:
api.set_setting('disabled', 'true')
click.echo(
"W&B disabled, running your script from this directory will only write metadata locally.")
except configparser.Error as e:
click.echo(
'Unable to write config, copy and paste the following in your terminal to turn off W&B:\nexport WANDB_MODE=dryrun')
RUN_CONTEXT = copy.copy(CONTEXT)
RUN_CONTEXT['allow_extra_args'] = True
RUN_CONTEXT['ignore_unknown_options'] = True
@cli.command(context_settings=RUN_CONTEXT, help="Launch a job")
@click.pass_context
@click.argument('program')
@click.argument('args', nargs=-1)
@click.option('--id', default=None,
help='Run id to use, default is to generate.')
@click.option('--resume', default='never', type=click.Choice(['never', 'must', 'allow']),
help='Resume strategy, default is never')
@click.option('--dir', default=None,
help='Files in this directory will be saved to wandb, defaults to wandb')
@click.option('--configs', default=None,
help='Config file paths to load')
@click.option('--message', '-m', default=None, hidden=True,
help='Message to associate with the run.')
@click.option('--name', default=None,
help='Name of the run, default is auto generated.')
@click.option('--notes', default=None,
help='Notes to associate with the run.')
@click.option("--show/--no-show", default=False,
help="Open the run page in your default browser.")
@click.option('--tags', default=None,
help='Tags to associate with the run (comma seperated).')
@click.option('--run_group', default=None,
help='Run group to associate with the run.')
@click.option('--job_type', default=None,
help='Job type to associate with the run.')
@display_error
def run(ctx, program, args, id, resume, dir, configs, message, name, notes, show, tags, run_group, job_type):
wandb.ensure_configured()
if configs:
config_paths = configs.split(',')
else:
config_paths = []
config = Config(config_paths=config_paths,
wandb_dir=dir or wandb.wandb_dir())
tags = [tag for tag in tags.split(",") if tag] if tags else None
run = wandb_run.Run(run_id=id, mode='clirun',
config=config, description=message,
program=program, tags=tags,
group=run_group, job_type=job_type,
name=name, notes=notes,
resume=resume)
run.enable_logging()
environ = dict(os.environ)
if configs:
environ[env.CONFIG_PATHS] = configs
if show:
environ[env.SHOW_RUN] = 'True'
if not run.api.api_key:
util.prompt_api_key(run.api)
try:
rm = run_manager.RunManager(run)
rm.init_run(environ)
except run_manager.Error:
exc_type, exc_value, exc_traceback = sys.exc_info()
wandb.termerror('An Exception was raised during setup, see %s for full traceback.' %
util.get_log_file_path())
wandb.termerror(str(exc_value))
if 'permission' in str(exc_value):
wandb.termerror(
'Are you sure you provided the correct API key to "wandb login"?')
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logger.error('\n'.join(lines))
sys.exit(1)
rm.run_user_process(program, args, environ)
@cli.command(context_settings=RUN_CONTEXT, name="docker-run")
@click.pass_context
@click.argument('docker_run_args', nargs=-1)
@click.option('--help')
def docker_run(ctx, docker_run_args, help):
"""Simple docker wrapper that adds WANDB_API_KEY and WANDB_DOCKER to any docker run command.
This will also set the runtime to nvidia if the nvidia-docker executable is present on the system
and --runtime wasn't set.
"""
args = list(docker_run_args)
if len(args) > 0 and args[0] == "run":
args.pop(0)
if help or len(args) == 0:
wandb.termlog("This commands adds wandb env variables to your docker run calls")
subprocess.call(['docker', 'run'] + args + ['--help'])
exit()
#TODO: is this what we want?
if len([a for a in args if a.startswith("--runtime")]) == 0 and find_executable('nvidia-docker'):
args = ["--runtime", "nvidia"] + args
#TODO: image_from_docker_args uses heuristics to find the docker image arg, there are likely cases
#where this won't work
image = util.image_from_docker_args(args)
resolved_image = None
if image:
resolved_image = wandb.docker.image_id(image)
if resolved_image:
args = ['-e', 'WANDB_DOCKER=%s' % resolved_image] + args
else:
wandb.termlog("Couldn't detect image argument, running command without the WANDB_DOCKER env variable")
if api.api_key:
args = ['-e', 'WANDB_API_KEY=%s' % api.api_key] + args
else:
wandb.termlog("Not logged in, run `wandb login` from the host machine to enable result logging")
subprocess.call(['docker', 'run'] + args)
@cli.command(context_settings=RUN_CONTEXT)
@click.pass_context
@click.argument('docker_run_args', nargs=-1)
@click.argument('docker_image', required=False)
@click.option('--nvidia/--no-nvidia', default=find_executable('nvidia-docker') != None,
help='Use the nvidia runtime, defaults to nvidia if nvidia-docker is present')
@click.option('--digest', is_flag=True, default=False, help="Output the image digest and exit")
@click.option('--jupyter/--no-jupyter', default=False, help="Run jupyter lab in the container")
@click.option('--dir', default="/app", help="Which directory to mount the code in the container")
@click.option('--no-dir', is_flag=True, help="Don't mount the current directory")
@click.option('--shell', default="/bin/bash", help="The shell to start the container with")
@click.option('--port', default="8888", help="The hot port to bind jupyter on")
@click.option('--cmd', help="The command to run in the container")
@click.option('--no-tty', is_flag=True, default=False, help="Run the command without a tty")
@display_error
def docker(ctx, docker_run_args, docker_image, nvidia, digest, jupyter, dir, no_dir, shell, port, cmd, no_tty):
"""W&B docker lets you run your code in a docker image ensuring wandb is configured. It adds the WANDB_DOCKER and WANDB_API_KEY
environment variables to your container and mounts the current directory in /app by default. You can pass additional
args which will be added to `docker run` before the image name is declared, we'll choose a default image for you if
one isn't passed:
wandb docker -v /mnt/dataset:/app/data
wandb docker gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.4.0 --jupyter
wandb docker wandb/deepo:keras-gpu --no-tty --cmd "python train.py --epochs=5"
By default we override the entrypoint to check for the existance of wandb and install it if not present. If you pass the --jupyter
flag we will ensure jupyter is installed and start jupyter lab on port 8888. If we detect nvidia-docker on your system we will use
the nvidia runtime. If you just want wandb to set environment variable to an existing docker run command, see the wandb docker-run
command.
"""
if not find_executable('docker'):
raise ClickException(
"Docker not installed, install it from https://docker.com" )
args = list(docker_run_args)
image = docker_image or ""
# remove run for users used to nvidia-docker
if len(args) > 0 and args[0] == "run":
args.pop(0)
if image == "" and len(args) > 0:
image = args.pop(0)
# If the user adds docker args without specifying an image (should be rare)
if not util.docker_image_regex(image.split("@")[0]):
if image:
args = args + [image]
image = wandb.docker.default_image(gpu=nvidia)
subprocess.call(["docker", "pull", image])
_, repo_name, tag = wandb.docker.parse(image)
resolved_image = wandb.docker.image_id(image)
if resolved_image is None:
raise ClickException(
"Couldn't find image locally or in a registry, try running `docker pull %s`" % image)
if digest:
sys.stdout.write(resolved_image)
exit(0)
existing = wandb.docker.shell(
["ps", "-f", "ancestor=%s" % resolved_image, "-q"])
if existing:
question = {
'type': 'confirm',
'name': 'attach',
'message': "Found running container with the same image, do you want to attach?",
}
result = whaaaaat.prompt([question])
if result and result['attach']:
subprocess.call(['docker', 'attach', existing.split("\n")[0]])
exit(0)
cwd = os.getcwd()
command = ['docker', 'run', '-e', 'LANG=C.UTF-8', '-e', 'WANDB_DOCKER=%s' % resolved_image, '--ipc=host',
'-v', wandb.docker.entrypoint+':/wandb-entrypoint.sh', '--entrypoint', '/wandb-entrypoint.sh']
if nvidia:
command.extend(['--runtime', 'nvidia'])
if not no_dir:
#TODO: We should default to the working directory if defined
command.extend(['-v', cwd+":"+dir, '-w', dir])
if api.api_key:
command.extend(['-e', 'WANDB_API_KEY=%s' % api.api_key])
else:
wandb.termlog("Couldn't find WANDB_API_KEY, run `wandb login` to enable streaming metrics")
if jupyter:
command.extend(['-e', 'WANDB_ENSURE_JUPYTER=1', '-p', port+':8888'])
no_tty = True
cmd = "jupyter lab --no-browser --ip=0.0.0.0 --allow-root --NotebookApp.token= --notebook-dir %s" % dir
command.extend(args)
if no_tty:
command.extend([image, shell, "-c", cmd])
else:
if cmd:
command.extend(['-e', 'WANDB_COMMAND=%s' % cmd])
command.extend(['-it', image, shell])
wandb.termlog("Launching docker container \U0001F6A2")
subprocess.call(command)
MONKEY_CONTEXT = copy.copy(CONTEXT)
MONKEY_CONTEXT['allow_extra_args'] = True
MONKEY_CONTEXT['ignore_unknown_options'] = True
@cli.command(context_settings=MONKEY_CONTEXT, help="Run any script with wandb", hidden=True)
@click.pass_context
@click.argument('program')
@click.argument('args', nargs=-1)
@display_error
def magic(ctx, program, args):
def magic_run(cmd, globals, locals):
try:
exec(cmd, globals, locals)
finally:
pass
sys.argv[:] = args
sys.argv.insert(0, program)
sys.path.insert(0, os.path.dirname(program))
try:
with open(program, 'rb') as fp:
code = compile(fp.read(), program, 'exec')
except IOError:
click.echo(click.style("Could not launch program: %s" % program, fg="red"))
sys.exit(1)
globs = {
'__file__': program,
'__name__': '__main__',
'__package__': None,
'wandb_magic_install': magic_install,
}
prep = '''
import __main__
__main__.__file__ = "%s"
wandb_magic_install()
''' % program
magic_run(prep, globs, None)
magic_run(code, globs, None)
@cli.command(context_settings=CONTEXT, help="Create a sweep")
@click.pass_context
@click.option('--controller', is_flag=True, default=False, help="Run local controller")
@click.option('--verbose', is_flag=True, default=False, help="Display verbose output")
@click.argument('config_yaml')
@display_error
def sweep(ctx, controller, verbose, config_yaml):
click.echo('Creating sweep from: %s' % config_yaml)
try:
yaml_file = open(config_yaml)
except (OSError, IOError):
wandb.termerror('Couldn\'t open sweep file: %s' % config_yaml)
return
try:
config = util.load_yaml(yaml_file)
except yaml.YAMLError as err:
wandb.termerror('Error in configuration file: %s' % err)
return
if config is None:
wandb.termerror('Configuration file is empty')
return
is_local = config.get('controller', {}).get('type') == 'local'
if is_local:
tuner = wandb_controller.controller()
err = tuner._validate(config)
if err:
wandb.termerror('Error in sweep file: %s' % err)
return
else:
if controller:
wandb.termerror('Option "controller" only permitted for controller type "local"')
return
sweep_id = api.upsert_sweep(config)
print('Create sweep with ID:', sweep_id)
sweep_url = wandb_controller._get_sweep_url(api, sweep_id)
if sweep_url:
print('Sweep URL:', sweep_url)
if controller:
click.echo('Starting wandb controller...')
tuner = wandb_controller.controller(sweep_id)
tuner.run(verbose=verbose)
@cli.command(context_settings=CONTEXT, help="Run the W&B agent")
@click.argument('sweep_id')
@display_error
def agent(sweep_id):
if sys.platform == 'win32':
wandb.termerror('Agent is not supported on Windows')
sys.exit(1)
click.echo('Starting wandb agent 🕵️')
wandb_agent.run_agent(sweep_id)
# you can send local commands like so:
# agent_api.command({'type': 'run', 'program': 'train.py',
# 'args': ['--max_epochs=10']})
@cli.command(context_settings=CONTEXT, help="Run the W&B local sweep controller")
@click.option('--verbose', is_flag=True, default=False, help="Display verbose output")
@click.argument('sweep_id')
@display_error
def controller(verbose, sweep_id):
click.echo('Starting wandb controller...')
tuner = wandb_controller.controller(sweep_id)
tuner.run(verbose=verbose)
if __name__ == "__main__":
cli()
|
ai_flow_client.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import threading
import time
from functools import wraps
from random import shuffle
from typing import Text
import grpc
from notification_service.base_notification import BaseEvent
from notification_service.client import NotificationClient
from ai_flow.context.project_context import current_project_config
from ai_flow.endpoint.client.metadata_client import MetadataClient
from ai_flow.endpoint.client.metric_client import MetricClient
from ai_flow.endpoint.client.model_center_client import ModelCenterClient
from ai_flow.endpoint.client.scheduler_client import SchedulerClient
from ai_flow.endpoint.server.high_availability import proto_to_member, sleep_and_detecting_running
from ai_flow.project.project_config import ProjectConfig
from ai_flow.protobuf.high_availability_pb2 import ListMembersRequest, ReturnStatus
from ai_flow.protobuf.high_availability_pb2_grpc import HighAvailabilityManagerStub
from ai_flow.protobuf.metadata_service_pb2_grpc import MetadataServiceStub
from ai_flow.protobuf.metric_service_pb2_grpc import MetricServiceStub
from ai_flow.protobuf.model_center_service_pb2_grpc import ModelCenterServiceStub
from ai_flow.protobuf.scheduling_service_pb2_grpc import SchedulingServiceStub
if not hasattr(time, 'time_ns'):
time.time_ns = lambda: int(time.time() * 1e9)
AI_FLOW_TYPE = "AI_FLOW"
_SERVER_URI = 'localhost:50051'
_default_ai_flow_client = None
_default_server_uri = 'localhost:50051'
_default_airflow_operation_client = None
def get_ai_flow_client():
""" Get AI flow Client. """
global _default_ai_flow_client, _default_server_uri
if _default_ai_flow_client is None:
current_uri = current_project_config().get_server_uri()
if current_uri is None:
return None
else:
_default_server_uri = current_uri
_default_ai_flow_client \
= AIFlowClient(server_uri=_default_server_uri,
notification_server_uri=current_project_config().get_notification_server_uri(),
project_config=current_project_config())
return _default_ai_flow_client
else:
current_uri = current_project_config().get_server_uri()
if current_uri != _default_server_uri:
_default_server_uri = current_uri
_default_ai_flow_client \
= AIFlowClient(server_uri=_default_server_uri,
notification_server_uri=current_project_config().get_notification_server_uri(),
project_config=current_project_config())
else:
# when reuse previous client, confirm once whether server is available
_default_ai_flow_client.wait_for_ready_and_throw_error()
return _default_ai_flow_client
class AIFlowClient(NotificationClient, MetadataClient, ModelCenterClient, MetricClient, SchedulerClient):
"""
Client of an AIFlow Server that manages metadata store, model center and notification service.
"""
CLIENT_INIT_WAIT_READY_TIMEOUT = 5.
def __init__(self,
server_uri=_SERVER_URI,
notification_server_uri=None,
project_config: ProjectConfig = None):
MetadataClient.__init__(self, server_uri)
ModelCenterClient.__init__(self, server_uri)
MetricClient.__init__(self, server_uri)
SchedulerClient.__init__(self, server_uri)
self.aiflow_ha_enabled = False
self.list_member_interval_ms = 5000
self.retry_interval_ms = 1000
self.retry_timeout_ms = 10000
project_name = None
if project_config is not None:
if server_uri is None:
server_uri = project_config.get_server_uri()
if notification_server_uri is None:
notification_server_uri = project_config.get_notification_server_uri()
project_name = project_config.get_project_name()
self.aiflow_ha_enabled = project_config.get_enable_ha()
self.list_member_interval_ms = project_config.get_list_member_interval_ms()
self.retry_interval_ms = project_config.get_retry_interval_ms()
self.retry_timeout_ms = project_config.get_retry_timeout_ms()
self.server_uri = server_uri
self.wait_for_ready_and_throw_error()
if notification_server_uri is None:
raise Exception('Config notification_server_uri not set.')
NotificationClient.__init__(
self,
notification_server_uri,
list_member_interval_ms=self.list_member_interval_ms,
retry_interval_ms=self.retry_interval_ms,
retry_timeout_ms=self.retry_timeout_ms,
default_namespace=project_name)
if self.aiflow_ha_enabled:
server_uris = server_uri.split(",")
self.living_aiflow_members = []
self.current_aiflow_uri = None
last_error = None
for server_uri in server_uris:
channel = grpc.insecure_channel(server_uri)
high_availability_stub = HighAvailabilityManagerStub(channel)
try:
request = ListMembersRequest(timeout_seconds=0)
response = high_availability_stub.listMembers(request)
if response.return_code == ReturnStatus.CALL_SUCCESS:
self.living_aiflow_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
raise Exception(response.return_msg)
self.current_aiflow_uri = server_uri
self.high_availability_stub = high_availability_stub
break
except grpc.RpcError as e:
last_error = e
if self.current_aiflow_uri is None:
raise Exception("No available aiflow server uri!") from last_error
self.aiflow_ha_change_lock = threading.Lock()
self.aiflow_ha_running = True
self._replace_aiflow_stubs(self.current_aiflow_uri)
self.list_aiflow_member_thread = threading.Thread(target=self._list_aiflow_members, daemon=True)
self.list_aiflow_member_thread.start()
def wait_for_ready_and_throw_error(self):
server_uris = self.server_uri.split(",")
available = False
for uri in server_uris:
try:
channel = grpc.insecure_channel(uri)
fut = grpc.channel_ready_future(channel)
fut.result(self.CLIENT_INIT_WAIT_READY_TIMEOUT)
available = True
break
except:
pass
if not available:
raise Exception(f"Client connection to server({self.server_uri}) is not ready. Please confirm the status of the process for `AIFlowServer`.")
def publish_event(self, key: str, value: str, event_type: str = AI_FLOW_TYPE) -> BaseEvent:
return self.send_event(BaseEvent(key, value, event_type))
def _list_aiflow_members(self):
while self.aiflow_ha_running:
# refresh the living members
request = ListMembersRequest(timeout_seconds=int(self.list_member_interval_ms / 1000))
response = self.high_availability_stub.listMembers(request)
if response.return_code == ReturnStatus.CALL_SUCCESS:
with self.aiflow_ha_change_lock:
self.living_aiflow_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
logging.error("Exception thrown when updating the living members: %s" %
response.return_msg)
def _aiflow_ha_wrapper(self, func, stub_name):
@wraps(func)
def call_with_retry(*args, **kwargs):
current_stub = getattr(self, stub_name)
current_func = getattr(current_stub, func.__name__).inner_func
start_time = time.time_ns() / 1000000
failed_members = set()
while True:
try:
return current_func(*args, **kwargs)
except grpc.RpcError:
logging.error("Exception thrown when calling rpc, change the connection.",
exc_info=True)
with self.aiflow_ha_change_lock:
# check the current_uri to ensure thread safety
if current_func.server_uri == self.current_aiflow_uri:
living_members = list(self.living_aiflow_members)
failed_members.add(self.current_aiflow_uri)
shuffle(living_members)
found_new_member = False
for server_uri in living_members:
if server_uri in failed_members:
continue
next_uri = server_uri
self._replace_aiflow_stubs(next_uri)
current_func = getattr(getattr(self, stub_name),
current_func.__name__).inner_func
self.current_aiflow_uri = next_uri
found_new_member = True
if not found_new_member:
logging.error("No available living members currently. Sleep and retry.")
failed_members.clear()
sleep_and_detecting_running(self.retry_interval_ms,
lambda: self.aiflow_ha_running)
# break if stopped or timeout
if not self.aiflow_ha_running or \
time.time_ns() / 1000000 > start_time + self.retry_timeout_ms:
if not self.aiflow_ha_running:
raise Exception("HA has been disabled.")
else:
raise Exception("Rpc retry timeout!")
call_with_retry.inner_func = func
return call_with_retry
def _wrap_aiflow_rpcs(self, stub, server_uri, stub_name):
for method_name, method in dict(stub.__dict__).items():
method.__name__ = method_name
method.server_uri = server_uri
setattr(stub, method_name, self._aiflow_ha_wrapper(method, stub_name))
return stub
def _replace_aiflow_stubs(self, server_uri):
high_availability_channel = grpc.insecure_channel(server_uri)
high_availability_stub = self._wrap_aiflow_rpcs(
HighAvailabilityManagerStub(high_availability_channel),
server_uri,
"high_availability_stub")
self.high_availability_stub = high_availability_stub
metadata_channel = grpc.insecure_channel(server_uri)
metadata_store_stub = self._wrap_aiflow_rpcs(
MetadataServiceStub(metadata_channel),
server_uri,
"metadata_store_stub")
self.metadata_store_stub = metadata_store_stub
model_center_channel = grpc.insecure_channel(server_uri)
model_center_stub = self._wrap_aiflow_rpcs(
ModelCenterServiceStub(model_center_channel),
server_uri,
"model_center_stub")
self.model_center_stub = model_center_stub
metric_channel = grpc.insecure_channel(server_uri)
metric_stub = self._wrap_aiflow_rpcs(
MetricServiceStub(metric_channel),
server_uri,
"metric_stub")
self.metric_stub = metric_stub
scheduling_channel = grpc.insecure_channel(server_uri)
scheduling_stub = self._wrap_aiflow_rpcs(
SchedulingServiceStub(scheduling_channel),
server_uri,
"scheduling_stub")
self.scheduling_stub = scheduling_stub
def disable_high_availability(self):
if hasattr(self, "aiflow_ha_running"):
self.aiflow_ha_running = False
NotificationClient.disable_high_availability(self)
if hasattr(self, "aiflow_ha_running"):
self.list_aiflow_member_thread.join()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from datetime import timedelta
import functools as ft
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import core as ha, loader, config_entries
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config.async_load = Mock()
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
def async_add_job(target, *args):
"""Add a magic mock."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
hass.async_add_job = async_add_job
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS:
yield from loop.run_in_executor(None, loader.prepare, hass)
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@asyncio.coroutine
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or {}
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
class MockModule(object):
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform(object):
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
elif method is None:
return self.calls[-1]
else:
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Return a coro that returns a value."""
return mock_coro_func(return_value)()
def mock_coro_func(return_value=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
def _handle(self, attr):
"""Helper for the attributes."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.